code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> str:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class UpperCamelCase_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase_ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''dpt'''
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=384 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=[2, 5, 8, 11] , lowerCAmelCase_="project" , lowerCAmelCase_=[4, 2, 1, 0.5] , lowerCAmelCase_=[96, 192, 384, 768] , lowerCAmelCase_=256 , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=255 , lowerCAmelCase_=0.1 , lowerCAmelCase_=[1, 1024, 24, 24] , lowerCAmelCase_=[0, 1] , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_size
_snake_case = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_snake_case = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_snake_case = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
_snake_case = BitConfig(**lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_snake_case = backbone_featmap_shape
_snake_case = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_snake_case = None
_snake_case = None
_snake_case = []
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
_snake_case = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_snake_case = readout_type
_snake_case = reassemble_factors
_snake_case = neck_hidden_sizes
_snake_case = fusion_hidden_size
_snake_case = head_in_index
_snake_case = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_snake_case = use_auxiliary_head
_snake_case = auxiliary_loss_weight
_snake_case = semantic_loss_ignore_index
_snake_case = semantic_classifier_dropout
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_snake_case = self.backbone_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase__ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )]
return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )]
def lowerCamelCase__ ( UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : float = 10**-1 ) -> bool:
'''simple docstring'''
_snake_case = cross(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = sum(UpperCamelCase__ )
return abs(UpperCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase_ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCAmelCase_ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 295
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 1
|
from math import factorial
def lowerCamelCase__ ( UpperCamelCase__ : int = 100 ) -> int:
'''simple docstring'''
return sum(map(UpperCamelCase__ , str(factorial(UpperCamelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 295
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
import math
import sys
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
if number != int(UpperCamelCase__ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
_snake_case = [-1] * (number + 1)
_snake_case = 0
for i in range(1 , number + 1 ):
_snake_case = sys.maxsize
_snake_case = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
_snake_case = 1 + answers[i - (j**2)]
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''facebook/nllb-200-distilled-600M'''
lowerCAmelCase_ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
lowerCAmelCase_ = '''translator'''
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSeqaSeqLM
lowerCAmelCase_ = LANGUAGE_CODES
lowerCAmelCase_ = ['''text''', '''text''', '''text''']
lowerCAmelCase_ = ['''text''']
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_snake_case = self.lang_to_code[src_lang]
_snake_case = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase_ , return_tensors='pt' , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.model.generate(**lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_ )
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase_ = 6_37_81_37.0
UpperCAmelCase_ = 6_35_67_52.31_42_45
UpperCAmelCase_ = 6378137
def lowerCamelCase__ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
'''simple docstring'''
_snake_case = (AXIS_A - AXIS_B) / AXIS_A
_snake_case = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
_snake_case = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
_snake_case = radians(UpperCamelCase__ )
_snake_case = radians(UpperCamelCase__ )
# Equation
_snake_case = sin((phi_a - phi_a) / 2 )
_snake_case = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_snake_case = sqrt(sin_sq_phi + (cos(UpperCamelCase__ ) * cos(UpperCamelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = eval_examples
_snake_case = post_process_function
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = "eval" , **lowerCAmelCase_ , ) -> Dict[str, float]:
_snake_case = gen_kwargs.copy()
_snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
_snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
_snake_case = gen_kwargs
_snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case = self.get_eval_dataloader(lowerCAmelCase_ )
_snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
_snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_ = "test" , **lowerCAmelCase_ ) -> str:
_snake_case = gen_kwargs.copy()
_snake_case = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case = self.compute_metrics
_snake_case = None
_snake_case = time.time()
_snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case = eval_loop(
lowerCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
_snake_case = compute_metrics
_snake_case = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 'predict' )
_snake_case = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_snake_case = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase_ = """CompVis/stable-diffusion-v1-1"""
UpperCAmelCase_ = """CompVis/stable-diffusion-v1-2"""
UpperCAmelCase_ = """CompVis/stable-diffusion-v1-3"""
UpperCAmelCase_ = """CompVis/stable-diffusion-v1-4"""
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> Optional[int]:
super()._init_()
_snake_case = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
_snake_case = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
_snake_case = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ )
_snake_case = StableDiffusionPipeline(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=lowerCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , lowerCAmelCase_ ) for k in self.config.keys() if not k.startswith('_' )}
def lowerCAmelCase ( self , lowerCAmelCase_ = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
self.enable_attention_slicing(lowerCAmelCase_ )
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ) -> str:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ) -> Optional[int]:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ) -> List[str]:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ) -> Dict:
return self.pipea(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
@torch.no_grad()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , **lowerCAmelCase_ , ) -> Any:
_snake_case = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(lowerCAmelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_snake_case = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
_snake_case = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
_snake_case = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
_snake_case = self.textaimg_sda_a(
prompt=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , **lowerCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 295
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> list[int]:
'''simple docstring'''
_snake_case = 2
_snake_case = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase__ )
if n > 1:
factors.append(UpperCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_snake_case = len(bin(UpperCamelCase__ )[3:] )
_snake_case = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
'1'
+ '0' * (binary_number_length - len(UpperCamelCase__ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase_ = random.Random()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None ) -> Any:
'''simple docstring'''
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=400 , lowerCAmelCase_=2000 , lowerCAmelCase_=24 , lowerCAmelCase_=24 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1_6000 , lowerCAmelCase_=True , lowerCAmelCase_=True , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = num_mel_bins
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def lowerCAmelCase ( self ) -> Dict:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Optional[Any]:
def _flatten(lowerCAmelCase_ ):
return list(itertools.chain(*lowerCAmelCase_ ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(lowerCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = SpeechaTextFeatureExtractionTester(self )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(lowerCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_snake_case = feature_extractor(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_snake_case = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test batched
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(lowerCAmelCase_ )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feature_extractor(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = ['longest', 'max_length', 'do_not_pad']
_snake_case = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = feature_extractor(
lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = [np.sum(lowerCAmelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='max_length' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='longest' , max_length=4 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = feature_extractor(
lowerCAmelCase_ , padding='longest' , max_length=16 , truncation=lowerCAmelCase_ , return_tensors='np' , return_attention_mask=lowerCAmelCase_ , )
_snake_case = inputs.input_features
_snake_case = inputs.attention_mask
_snake_case = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCAmelCase ( self ) -> Any:
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(100 , 32 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_snake_case = ds.sort('id' ).select(range(lowerCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase ( self ) -> Any:
# fmt: off
_snake_case = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
_snake_case = self._load_datasamples(1 )
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''altclip_text_model'''
def __init__( self , lowerCAmelCase_=25_0002 , lowerCAmelCase_=1024 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=4096 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=768 , **lowerCAmelCase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = initializer_factor
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = project_dim
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''altclip_vision_model'''
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=3072 , lowerCAmelCase_=512 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=224 , lowerCAmelCase_=32 , lowerCAmelCase_="quick_gelu" , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , **lowerCAmelCase_ , ) -> int:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = projection_dim
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = initializer_factor
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCAmelCase_ )
_snake_case , _snake_case = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
_snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''altclip'''
lowerCAmelCase_ = True
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=768 , lowerCAmelCase_=2.65_92 , **lowerCAmelCase_ ) -> List[Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_snake_case = kwargs.pop('text_config_dict' , lowerCAmelCase_ )
_snake_case = kwargs.pop('vision_config_dict' , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_snake_case = {}
# This is the complete result when using `text_config_dict`.
_snake_case = AltCLIPTextConfig(**lowerCAmelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_snake_case = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_snake_case = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_snake_case = {}
# This is the complete result when using `vision_config_dict`.
_snake_case = AltCLIPVisionConfig(**lowerCAmelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_snake_case = {
str(lowerCAmelCase_ ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_snake_case = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_snake_case = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_snake_case = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
_snake_case = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
_snake_case = AltCLIPTextConfig(**lowerCAmelCase_ )
_snake_case = AltCLIPVisionConfig(**lowerCAmelCase_ )
_snake_case = projection_dim
_snake_case = logit_scale_init_value
_snake_case = 1.0
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.text_config.to_dict()
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 295
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 1
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase_ ( pl.LightningModule ):
def __init__( self , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = model
_snake_case = 2
_snake_case = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
_snake_case = LongformerModel.from_pretrained(UpperCamelCase__ )
_snake_case = LightningModel(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_snake_case = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 295
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 1
|
from typing import Any
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = data
_snake_case = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class UpperCamelCase_ :
def __init__( self ) -> Union[str, Any]:
_snake_case = None
def __iter__( self ) -> Any:
_snake_case = self.head
while node:
yield node.data
_snake_case = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowerCAmelCase_ ) for item in self] )
def __getitem__( self , lowerCAmelCase_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_snake_case = self.head
for _ in range(lowerCAmelCase_ ):
_snake_case = current.next
_snake_case = data
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
self.insert_nth(len(self ) , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
self.insert_nth(0 , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_snake_case = Node(lowerCAmelCase_ )
if self.head is None:
_snake_case = new_node
elif index == 0:
_snake_case = self.head # link new_node to head
_snake_case = new_node
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = new_node
def lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase ( self , lowerCAmelCase_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_snake_case = self.head # default first node
if index == 0:
_snake_case = self.head.next
else:
_snake_case = self.head
for _ in range(index - 1 ):
_snake_case = temp.next
_snake_case = temp.next
_snake_case = temp.next.next
return delete_node.data
def lowerCAmelCase ( self ) -> bool:
return self.head is None
def lowerCAmelCase ( self ) -> None:
_snake_case = None
_snake_case = self.head
while current:
# Store the current node's next node.
_snake_case = current.next
# Make the current node's next point backwards
_snake_case = prev
# Make the previous node be the current node
_snake_case = current
# Make the current node the next node (to progress iteration)
_snake_case = next_node
# Return prev in order to put the head at the end
_snake_case = prev
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase__ ) == i
linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase__ ) == 9
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(-8 , 1 ) )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_snake_case = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase__ )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
_snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCamelCase__ )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
_snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCamelCase__ )
print(F'''length of linked_list is : {len(UpperCamelCase__ )}''' )
if __name__ == "__main__":
main()
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase_ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = '''gelu'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
def lowerCAmelCase ( self ) -> Dict:
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case = prepare_blenderbot_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = TFBlenderbotModel(config=lowerCAmelCase_ ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = inputs_dict['head_mask']
_snake_case = 1
# first forward pass
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFBlenderbotModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowerCAmelCase_ = ['''My friends are cool but they eat too many carbs.''']
lowerCAmelCase_ = '''facebook/blenderbot-400M-distill'''
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase ( self ) -> int:
_snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.tokenizer(self.src_text , return_tensors='tf' )
_snake_case = self.model.generate(
model_inputs.input_ids , )
_snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_snake_case = 4
_snake_case = 48
_snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_snake_case = [6, 6, 6, 6]
_snake_case = 60
_snake_case = [6, 6, 6, 6]
_snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_snake_case = 4
_snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_snake_case = 1
_snake_case = 1
_snake_case = 126
_snake_case = 7
_snake_case = 255.0
_snake_case = ''
return config
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
_snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
_snake_case = 'layernorm.weight'
if name == "norm.bias":
_snake_case = 'layernorm.bias'
if "conv_first" in name:
_snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
_snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
_snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
_snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
_snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
_snake_case = 'swin2sr.' + name
return name
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_snake_case = key.split('.' )
_snake_case = int(key_split[1] )
_snake_case = int(key_split[4] )
_snake_case = config.embed_dim
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
pass
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
_snake_case = get_config(UpperCamelCase__ )
_snake_case = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
_snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
_snake_case , _snake_case = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
_snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
_snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_snake_case = 126 if 'Jpeg' in checkpoint_url else 256
_snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_snake_case = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
_snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
_snake_case = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_snake_case = torch.Size([1, 3, 512, 512] )
_snake_case = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_snake_case = torch.Size([1, 3, 1_024, 1_024] )
_snake_case = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_snake_case = torch.Size([1, 3, 1_024, 1_024] )
_snake_case = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_snake_case = torch.Size([1, 3, 512, 512] )
_snake_case = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_snake_case = torch.Size([1, 3, 1_024, 1_024] )
_snake_case = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-3 )
print('Looks ok!' )
_snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
UpperCAmelCase_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 295
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = F'''Expected string as input, found {type(UpperCamelCase__ )}'''
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = F'''Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}'''
raise ValueError(UpperCamelCase__ )
_snake_case = input_str.split('_' )
_snake_case = 0 if use_pascal else 1
_snake_case = words[start_index:]
_snake_case = [word[0].upper() + word[1:] for word in words_to_capitalize]
_snake_case = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_snake_case = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ )
_snake_case = CLIPTextModelWithProjection(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> List[str]:
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case = image / 2 + 0.5
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def lowerCAmelCase ( self ) -> int:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# forward without prompt embeds
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 3 * ['this is a negative prompt']
_snake_case = negative_prompt
_snake_case = 3 * [inputs['prompt']]
_snake_case = sd_pipe(**lowerCAmelCase_ )
_snake_case = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 3 * ['this is a negative prompt']
_snake_case = 3 * [inputs.pop('prompt' )]
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = sd_pipe.encode_prompt(lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case = sd_pipe(
**lowerCAmelCase_ , prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , pooled_prompt_embeds=lowerCAmelCase_ , negative_pooled_prompt_embeds=lowerCAmelCase_ , )
_snake_case = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ) -> List[str]:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self ) -> str:
_snake_case = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 295
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MobileViTFeatureExtractor"""]
UpperCAmelCase_ = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 1
|
from math import pi, sqrt
def lowerCamelCase__ ( UpperCamelCase__ : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = 1.0
while num:
UpperCAmelCase_ = float(input("""Gamma of: """))
print(F"gamma({num}) = {gamma(num)}")
print("""\nEnter 0 to exit...""")
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ) -> Optional[int]:
'''simple docstring'''
_snake_case = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int=False ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''
else:
_snake_case = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
_snake_case = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_snake_case = dct.pop(UpperCamelCase__ )
_snake_case = val
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=UpperCamelCase__ , )
_snake_case = ViTHybridConfig(backbone_config=UpperCamelCase__ , image_size=384 , num_labels=1_000 )
_snake_case = False
# load original model from timm
_snake_case = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
_snake_case = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_snake_case = ViTHybridModel(UpperCamelCase__ ).eval()
else:
_snake_case = ViTHybridForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# create image processor
_snake_case = create_transform(**resolve_data_config({} , model=UpperCamelCase__ ) )
_snake_case = transform.transforms
_snake_case = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_snake_case = ViTHybridImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_snake_case = prepare_img()
_snake_case = transform(UpperCamelCase__ ).unsqueeze(0 )
_snake_case = processor(UpperCamelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
# verify logits
with torch.no_grad():
_snake_case = model(UpperCamelCase__ )
_snake_case = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
_snake_case = timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
_snake_case = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = (DPMSolverSDEScheduler,)
lowerCAmelCase_ = 10
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = {
'num_train_timesteps': 1100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCAmelCase_ )
return config
def lowerCAmelCase ( self ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) )
_snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1E-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type='v_prediction' )
_snake_case = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) )
_snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1E-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1E-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1E-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1E-3
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) )
_snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1E-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1E-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1E-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1E-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1E-3
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**lowerCAmelCase_ , use_karras_sigmas=lowerCAmelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase_ )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.to(lowerCAmelCase_ ) * scheduler.init_noise_sigma
_snake_case = sample.to(lowerCAmelCase_ )
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(lowerCAmelCase_ ) )
_snake_case = torch.mean(torch.abs(lowerCAmelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1E-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1E-2
| 295
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
UpperCAmelCase_ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Any:
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase_ = logging.getLogger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[int]:
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
_snake_case = None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
_snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
_snake_case = str(distributed_port + 1 )
_snake_case = dist.new_group(ranks=lowerCAmelCase_ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCAmelCase ( self ) -> Tuple:
return dist.get_rank(group=self.process_group ) == 0
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ) -> List[str]:
_snake_case = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def lowerCAmelCase ( self ) -> Dict:
_snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_snake_case = next((addr for addr in addrs if addr.startswith('e' )) , lowerCAmelCase_ )
return ifname
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_snake_case , _snake_case = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
_snake_case = dist.get_world_size(group=self.process_group )
# gather logic
_snake_case = None
if self._is_main():
_snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
_snake_case = question_hidden_states.shape[0]
_snake_case = []
_snake_case = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
_snake_case , _snake_case = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
_snake_case , _snake_case = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
_snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
_snake_case = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 295
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 1
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str=False ) -> List[Any]:
'''simple docstring'''
_snake_case = 2
if unlogit:
_snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = p * torch.log(UpperCamelCase__ )
_snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
_snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case = None
_snake_case = 0.0
_snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
_snake_case = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case = 2
_snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCamelCase__ )
logger.info('Head ranked by importance scores' )
_snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case = torch.arange(
head_importance.numel() , device=args.device )
_snake_case = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
_snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold )
_snake_case = torch.ones_like(UpperCamelCase__ )
_snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case = float('Inf' )
_snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case = new_head_mask.view(-1 )
_snake_case = 0.0
_snake_case = new_head_mask.view_as(UpperCamelCase__ )
_snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
_snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case = torch.device('cuda' , args.local_rank )
_snake_case = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
_snake_case = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Prepare dataset
_snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case = (torch.from_numpy(UpperCamelCase__ ),)
_snake_case = TensorDataset(*UpperCamelCase__ )
_snake_case = RandomSampler(UpperCamelCase__ )
_snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
import os
from collections.abc import Iterator
def lowerCamelCase__ ( UpperCamelCase__ : str = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCamelCase__ ):
_snake_case = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase__ , UpperCamelCase__ ).lstrip('./' )
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
_snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase__ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(UpperCamelCase__ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCamelCase__ ( UpperCamelCase__ : str = "." ) -> None:
'''simple docstring'''
_snake_case = ''
for filepath in sorted(good_file_paths(UpperCamelCase__ ) ):
_snake_case , _snake_case = os.path.split(UpperCamelCase__ )
if filepath != old_path:
_snake_case = print_path(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
_snake_case = F'''{filepath}/{filename}'''.replace(' ' , '%20' )
_snake_case = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'''{md_prefix(UpperCamelCase__ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 295
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ) -> None:
'''simple docstring'''
_snake_case = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> None:
'''simple docstring'''
_snake_case = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print('' )
print(len(UpperCamelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 295
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> str:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> Any:
_snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=lowerCAmelCase_ )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 5_0000
_snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=33 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Optional[Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = EsmModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = EsmForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = self.num_labels
_snake_case = EsmForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = ()
lowerCAmelCase_ = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> str:
_snake_case = EsmModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> Tuple:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = EsmModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()[0]
_snake_case = EsmEmbeddings(config=lowerCAmelCase_ )
_snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_snake_case = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_snake_case = create_position_ids_from_input_ids(lowerCAmelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()[0]
_snake_case = EsmEmbeddings(config=lowerCAmelCase_ )
_snake_case = torch.empty(2 , 4 , 30 )
_snake_case = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] )
_snake_case = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase ( self ) -> List[str]:
pass
@require_torch
class UpperCamelCase_ ( _lowerCamelCase ):
@slow
def lowerCAmelCase ( self ) -> List[Any]:
with torch.no_grad():
_snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 33
_snake_case = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
with torch.no_grad():
_snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_snake_case = model(lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_snake_case = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""CLIPFeatureExtractor"""]
UpperCAmelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = PriorTransformer
lowerCAmelCase_ = '''hidden_states'''
@property
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase ( self , lowerCAmelCase_=0 ) -> Union[str, Any]:
torch.manual_seed(lowerCAmelCase_ )
_snake_case = 4
_snake_case = 8
_snake_case = 7
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
return (4, 8)
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
return (4, 8)
def lowerCAmelCase ( self ) -> str:
_snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case , _snake_case = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase_ )
_snake_case = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.model_class(**lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
_snake_case = model.to(lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
_snake_case = self.get_dummy_seed_input()
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )[0]
_snake_case = output[0, :5].flatten().cpu()
print(lowerCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_snake_case = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self , lowerCAmelCase_=1 , lowerCAmelCase_=768 , lowerCAmelCase_=77 , lowerCAmelCase_=0 ) -> str:
torch.manual_seed(lowerCAmelCase_ )
_snake_case = batch_size
_snake_case = embedding_dim
_snake_case = num_embeddings
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
_snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase_ )
_snake_case = self.get_dummy_seed_input(seed=lowerCAmelCase_ )
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
_snake_case = sample[0, :8].flatten().cpu()
print(lowerCAmelCase_ )
_snake_case = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
_snake_case = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( UpperCamelCase__ : int = 100 ) -> int:
'''simple docstring'''
_snake_case = 1
_snake_case = 2
for i in range(2 , max_n + 1 ):
_snake_case = pre_numerator
_snake_case = 2 * i // 3 if i % 3 == 0 else 1
_snake_case = cur_numerator
_snake_case = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_snake_case = number_of_bytes // partitions
_snake_case = []
for i in range(UpperCamelCase__ ):
_snake_case = i * bytes_per_partition + 1
_snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class UpperCamelCase_ ( folder_based_builder.FolderBasedBuilder ):
lowerCAmelCase_ = datasets.Audio()
lowerCAmelCase_ = '''audio'''
lowerCAmelCase_ = AudioFolderConfig
lowerCAmelCase_ = 42 # definition at the bottom of the script
lowerCAmelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase_ = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 295
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 1
|
from __future__ import annotations
from typing import Any
class UpperCamelCase_ ( _lowerCamelCase ):
pass
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> None:
_snake_case = data
_snake_case = None
def __iter__( self ) -> Optional[int]:
_snake_case = self
_snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase_ )
yield node.data
_snake_case = node.next_node
@property
def lowerCAmelCase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase_ = Node(1)
UpperCAmelCase_ = Node(2)
UpperCAmelCase_ = Node(3)
UpperCAmelCase_ = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase_ = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
UpperCAmelCase_ = Node(5)
UpperCAmelCase_ = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase_ = Node(1)
print(root_node.has_loop) # False
| 295
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = GPTaTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> str:
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = kwargs.pop('add_bos_token' , lowerCAmelCase_ )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase_ ) != add_prefix_space:
_snake_case = getattr(lowerCAmelCase_ , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowerCAmelCase_ )
_snake_case = add_prefix_space
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> BatchEncoding:
_snake_case = kwargs.get('is_split_into_words' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> BatchEncoding:
_snake_case = kwargs.get('is_split_into_words' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[int]:
_snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
return input_ids
| 295
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : list , UpperCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if len(UpperCamelCase__ ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase__ , n - 1 )
rec_insertion_sort(UpperCamelCase__ , n - 1 )
def lowerCamelCase__ ( UpperCamelCase__ : list , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_snake_case , _snake_case = (
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase__ , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter integers separated by spaces: """)
UpperCAmelCase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = None ) -> Optional[int]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase_ )
# standard deviation of the initial noise distribution
_snake_case = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_snake_case = 4
# running values
_snake_case = []
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[str]:
_snake_case = num_inference_steps
_snake_case = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_snake_case = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_snake_case = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_snake_case = torch.sin(steps * math.pi / 2 ) ** 2
_snake_case = (1.0 - self.betas**2) ** 0.5
_snake_case = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_snake_case = timesteps.to(lowerCAmelCase_ )
_snake_case = []
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
_snake_case = (self.timesteps == timestep).nonzero().item()
_snake_case = timestep_index + 1
_snake_case = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase_ )
if len(self.ets ) == 1:
_snake_case = self.ets[-1]
elif len(self.ets ) == 2:
_snake_case = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_snake_case = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_snake_case = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_snake_case = self._get_prev_sample(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> torch.FloatTensor:
return sample
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.alphas[timestep_index]
_snake_case = self.betas[timestep_index]
_snake_case = self.alphas[prev_timestep_index]
_snake_case = self.betas[prev_timestep_index]
_snake_case = (sample - sigma * ets) / max(lowerCAmelCase_ , 1E-8 )
_snake_case = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 1
|
import os
import numpy
import onnx
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
_snake_case = a.name
_snake_case = b.name
_snake_case = ''
_snake_case = ''
_snake_case = a == b
_snake_case = name_a
_snake_case = name_b
return res
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
_snake_case = list(model.graph.initializer )
_snake_case = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case = inits[i].name
_snake_case = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_snake_case = os.path.dirname(UpperCamelCase__ )
_snake_case = os.path.basename(UpperCamelCase__ )
_snake_case = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
_snake_case = list(model.graph.initializer )
_snake_case = set()
_snake_case = {}
_snake_case = []
_snake_case = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
_snake_case = inits[j].data_type
_snake_case = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
_snake_case = inits[i].name
_snake_case = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
_snake_case = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
_snake_case = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = 'optimized_' + model_file_name
_snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 295
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=False ) -> Tuple:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase__ ) ) )
return config
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Dict:
'''simple docstring'''
if conf_path is None:
_snake_case = './model_checkpoints/vqgan_only.yaml'
_snake_case = load_config(UpperCamelCase__ , display=UpperCamelCase__ )
_snake_case = VQModel(**config.model.params )
if ckpt_path is None:
_snake_case = './model_checkpoints/vqgan_only.pt'
_snake_case = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )
if ".ckpt" in ckpt_path:
_snake_case = sd['state_dict']
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
model.to(UpperCamelCase__ )
del sd
return model
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = model.encode(UpperCamelCase__ )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
_snake_case = model.decode(UpperCamelCase__ )
return xrec
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
_snake_case , _snake_case = string.rsplit('.' , 1 )
if reload:
_snake_case = importlib.import_module(UpperCamelCase__ )
importlib.reload(UpperCamelCase__ )
return getattr(importlib.import_module(UpperCamelCase__ , package=UpperCamelCase__ ) , cls )
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=True ) -> List[str]:
'''simple docstring'''
_snake_case = instantiate_from_config(UpperCamelCase__ )
if sd is not None:
model.load_state_dict(UpperCamelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
if ckpt:
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
_snake_case = pl_sd['global_step']
print(F'''loaded model from global step {global_step}.''' )
else:
_snake_case = {'state_dict': None}
_snake_case = None
_snake_case = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=UpperCamelCase__ , eval_mode=UpperCamelCase__ )['model']
return model, global_step
| 295
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 1
|
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> list[str]:
'''simple docstring'''
_snake_case = []
_snake_case = 11
_snake_case = int('1' + '0' * digit_len )
for num in range(UpperCamelCase__ , UpperCamelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCamelCase__ , UpperCamelCase__ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_snake_case = 10
return solutions
def lowerCamelCase__ ( UpperCamelCase__ : int = 2 ) -> int:
'''simple docstring'''
_snake_case = 1.0
for fraction in fraction_list(UpperCamelCase__ ):
_snake_case = Fraction(UpperCamelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
from manim import *
class UpperCamelCase_ ( _lowerCamelCase ):
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = Rectangle(height=0.5 , width=0.5 )
_snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = Text('CPU' , font_size=24 )
_snake_case = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase_ )
_snake_case = [mem.copy() for i in range(4 )]
_snake_case = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = Text('GPU' , font_size=24 )
_snake_case = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase_ )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = Text('Model' , font_size=24 )
_snake_case = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase_ )
_snake_case = []
for i, rect in enumerate(lowerCAmelCase_ ):
rect.set_stroke(lowerCAmelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase_ , buff=0.0 )
self.add(lowerCAmelCase_ )
cpu_targs.append(lowerCAmelCase_ )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 )
_snake_case = Text('Loaded Checkpoint' , font_size=24 )
_snake_case = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , aligned_edge=lowerCAmelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase_ ) , Write(lowerCAmelCase_ ) )
self.play(Write(lowerCAmelCase_ , run_time=1 ) , Create(lowerCAmelCase_ , run_time=1 ) )
_snake_case = []
_snake_case = []
for i, rect in enumerate(lowerCAmelCase_ ):
_snake_case = fill.copy().set_fill(lowerCAmelCase_ , opacity=0.7 )
target.move_to(lowerCAmelCase_ )
first_animations.append(GrowFromCenter(lowerCAmelCase_ , run_time=1 ) )
_snake_case = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase_ , run_time=1.5 ) )
self.play(*lowerCAmelCase_ )
self.play(*lowerCAmelCase_ )
self.wait()
| 295
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if "." in tensor_name:
_snake_case = tensor_name.split('.' )
for split in splits[:-1]:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_snake_case = new_module
_snake_case = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
_snake_case = tensor_name in module._buffers
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
_snake_case = False
_snake_case = False
if is_buffer or not is_bitsandbytes_available():
_snake_case = False
_snake_case = False
else:
_snake_case = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_snake_case = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_snake_case = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_snake_case = old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
_snake_case = value.to('cpu' )
if value.dtype == torch.inta:
_snake_case = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_snake_case = torch.tensor(UpperCamelCase__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , UpperCamelCase__ ) and fpaa_statistics is None:
_snake_case = new_value.T
_snake_case = old_value.__dict__
if is_abit:
_snake_case = bnb.nn.IntaParams(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ )
elif is_abit:
_snake_case = bnb.nn.Paramsabit(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ )
_snake_case = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(UpperCamelCase__ ) )
else:
if value is None:
_snake_case = old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
_snake_case = value.to(UpperCamelCase__ )
else:
_snake_case = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ )
if is_buffer:
_snake_case = new_value
else:
_snake_case = nn.Parameter(UpperCamelCase__ , requires_grad=old_value.requires_grad )
_snake_case = new_value
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_snake_case = []
current_key_name.append(UpperCamelCase__ )
if (isinstance(UpperCamelCase__ , nn.Linear ) or isinstance(UpperCamelCase__ , UpperCamelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(UpperCamelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case , _snake_case = module.weight.shape
else:
_snake_case = module.in_features
_snake_case = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_snake_case = bnb.nn.LinearabitLt(
UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_snake_case = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_snake_case = bnb.nn.Linearabit(
UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_snake_case = True
# Store the module class in case we need to transpose the weight later
_snake_case = type(UpperCamelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(UpperCamelCase__ )
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case = _replace_with_bnb_linear(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_been_replaced=UpperCamelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
_snake_case = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
_snake_case , _snake_case = _replace_with_bnb_linear(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCamelCase__ ( *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , UpperCamelCase__ , )
return replace_with_bnb_linear(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase__ ( *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , UpperCamelCase__ , )
return set_module_quantized_tensor_to_device(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_snake_case = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_snake_case = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case = sum(UpperCamelCase__ , [] )
_snake_case = len(UpperCamelCase__ ) > 0
# Check if it is a base model
_snake_case = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case = list(model.named_children() )
_snake_case = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
_snake_case = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
_snake_case = ['.weight', '.bias']
_snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case = name.replace(UpperCamelCase__ , '' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 1
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = params
_snake_case = np.array(lowerCAmelCase_ )
_snake_case = np.array([len(lowerCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase_ ) -> Tuple:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[Any]:
return len(self.lengths )
def lowerCAmelCase ( self ) -> Tuple:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.params.max_model_input_size
_snake_case = self.lengths > max_len
logger.info(F'''Splitting {sum(lowerCAmelCase_ )} too long sequences.''' )
def divide_chunks(lowerCAmelCase_ , lowerCAmelCase_ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )]
_snake_case = []
_snake_case = []
if self.params.mlm:
_snake_case , _snake_case = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_snake_case , _snake_case = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_snake_case = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_snake_case = np.insert(lowerCAmelCase_ , 0 , lowerCAmelCase_ )
if sub_s[-1] != sep_id:
_snake_case = np.insert(lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase_ )
new_tok_ids.extend(lowerCAmelCase_ )
new_lengths.extend([len(lowerCAmelCase_ ) for l in sub_seqs] )
_snake_case = np.array(lowerCAmelCase_ )
_snake_case = np.array(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = len(self )
_snake_case = self.lengths > 11
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_snake_case = self.params.special_tok_ids['unk_token']
_snake_case = len(self )
_snake_case = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_snake_case = (unk_occs / self.lengths) < 0.5
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowerCAmelCase ( self ) -> List[Any]:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_snake_case = [t[0] for t in batch]
_snake_case = [t[1] for t in batch]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
# Max for paddings
_snake_case = max(lowerCAmelCase_ )
# Pad token ids
if self.params.mlm:
_snake_case = self.params.special_tok_ids['pad_token']
else:
_snake_case = self.params.special_tok_ids['unk_token']
_snake_case = [list(t.astype(lowerCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase_ )
assert all(len(lowerCAmelCase_ ) == max_seq_len_ for t in tk_ )
_snake_case = torch.tensor(tk_ ) # (bs, max_seq_len_)
_snake_case = torch.tensor(lowerCAmelCase_ ) # (bs)
return tk_t, lg_t
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> None:
_snake_case = order
# a_{0} ... a_{k}
_snake_case = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_snake_case = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_snake_case = [0.0] * self.order
# y[n-1] ... y[n-k]
_snake_case = [0.0] * self.order
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if len(lowerCAmelCase_ ) < self.order:
_snake_case = [1.0, *a_coeffs]
if len(lowerCAmelCase_ ) != self.order + 1:
_snake_case = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(lowerCAmelCase_ )}'''
)
raise ValueError(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != self.order + 1:
_snake_case = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(lowerCAmelCase_ )}'''
)
raise ValueError(lowerCAmelCase_ )
_snake_case = a_coeffs
_snake_case = b_coeffs
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> float:
_snake_case = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_snake_case = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_snake_case = self.input_history[:-1]
_snake_case = self.output_history[:-1]
_snake_case = sample
_snake_case = result
return result
| 295
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 1
|
from collections.abc import Iterable
from typing import Any
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ = None ) -> str:
_snake_case = value
_snake_case = None # Added in order to delete a node easier
_snake_case = None
_snake_case = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ = None ) -> Dict:
_snake_case = root
def __str__( self ) -> str:
return str(self.root )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if new_children is not None: # reset its kids
_snake_case = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase_ ): # If it is the right children
_snake_case = new_children
else:
_snake_case = new_children
else:
_snake_case = new_children
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCAmelCase ( self ) -> bool:
return self.root is None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
_snake_case = Node(lowerCAmelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
_snake_case = new_node # set its root
else: # Tree is not empty
_snake_case = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_snake_case = new_node # We insert the new node in a leaf
break
else:
_snake_case = parent_node.left
else:
if parent_node.right is None:
_snake_case = new_node
break
else:
_snake_case = parent_node.right
_snake_case = parent_node
def lowerCAmelCase ( self , *lowerCAmelCase_ ) -> None:
for value in values:
self.__insert(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
_snake_case = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_snake_case = node.left if value < node.value else node.right
return node
def lowerCAmelCase ( self , lowerCAmelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_snake_case = self.root
if not self.empty():
while node.right is not None:
_snake_case = node.right
return node
def lowerCAmelCase ( self , lowerCAmelCase_ = None ) -> Node | None:
if node is None:
_snake_case = self.root
if self.root is None:
return None
if not self.empty():
_snake_case = self.root
while node.left is not None:
_snake_case = node.left
return node
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> None:
_snake_case = self.search(lowerCAmelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase_ , lowerCAmelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase_ , node.left )
else:
_snake_case = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_snake_case = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCAmelCase ( self , lowerCAmelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if node:
self.inorder(lowerCAmelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase_ , node.right )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_snake_case = []
self.inorder(lowerCAmelCase_ , lowerCAmelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( UpperCamelCase__ : Node | None ) -> list[Node]:
'''simple docstring'''
_snake_case = []
if curr_node is not None:
_snake_case = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_snake_case = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 295
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = """▁"""
UpperCAmelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase_ = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
UpperCAmelCase_ = {
"""google/pegasus-xsum""": 512,
}
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<mask_2>" , lowerCAmelCase_="<mask_1>" , lowerCAmelCase_=None , lowerCAmelCase_=103 , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCAmelCase_ )}, but is'''
F''' {type(lowerCAmelCase_ )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCAmelCase_ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase_ ) ) != len(lowerCAmelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token_sent=lowerCAmelCase_ , offset=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self ) -> int:
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self ) -> Dict[str, int]:
_snake_case = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(lowerCAmelCase_ )
return sp_id + self.offset
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_snake_case = []
_snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_snake_case = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self , lowerCAmelCase_=False ) -> int:
return 1
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 295
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> List[str]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def lowerCAmelCase ( self ) -> Any:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = TFDebertaVaModel(config=lowerCAmelCase_ )
_snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case = [input_ids, input_mask]
_snake_case = model(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = TFDebertaVaForMaskedLM(config=lowerCAmelCase_ )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = self.num_labels
_snake_case = TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = self.num_labels
_snake_case = TFDebertaVaForTokenClassification(config=lowerCAmelCase_ )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
_snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFDebertaVaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowerCAmelCase ( self ) -> int:
pass
@slow
def lowerCAmelCase ( self ) -> Any:
_snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_snake_case = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 )
| 295
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
_snake_case = argparse.ArgumentParser(add_help=UpperCamelCase__ , allow_abbrev=UpperCamelCase__ )
# The main config parser
_snake_case = config_command_parser(UpperCamelCase__ )
# The subparser to add commands to
_snake_case = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(UpperCamelCase__ , parents=[parent_parser] )
update_command_parser(UpperCamelCase__ , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = get_config_parser()
_snake_case = config_parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 295
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ ( _lowerCamelCase ):
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_heads' ) )
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=64 , lowerCAmelCase_=3 , lowerCAmelCase_=[16, 48, 96] , lowerCAmelCase_=[1, 3, 6] , lowerCAmelCase_=[1, 2, 10] , lowerCAmelCase_=[7, 3, 3] , lowerCAmelCase_=[4, 2, 2] , lowerCAmelCase_=[2, 1, 1] , lowerCAmelCase_=[2, 2, 2] , lowerCAmelCase_=[False, False, True] , lowerCAmelCase_=[0.0, 0.0, 0.0] , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=2 , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_sizes
_snake_case = patch_stride
_snake_case = patch_padding
_snake_case = is_training
_snake_case = use_labels
_snake_case = num_labels
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = num_heads
_snake_case = stride_kv
_snake_case = depth
_snake_case = cls_token
_snake_case = attention_drop_rate
_snake_case = initializer_range
_snake_case = layer_norm_eps
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
# create a random int32 tensor of given shape
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Dict:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = TFCvtModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
_snake_case = (self.image_size, self.image_size)
_snake_case , _snake_case = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = self.num_labels
_snake_case = TFCvtForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> int:
_snake_case = TFCvtModelTester(self )
_snake_case = TFCvtConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> Dict:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowerCAmelCase ( self ) -> Dict:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFCvtModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self ) -> int:
_snake_case = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase_ , atol=1E-4 ) )
| 295
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> str:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> List[str]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = DistilBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = DistilBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = DistilBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_snake_case = self.num_labels
_snake_case = DistilBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = self.num_labels
_snake_case = DistilBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = self.num_choices
_snake_case = DistilBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = DistilBertModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> Any:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = DistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def lowerCAmelCase ( self ) -> Any:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_snake_case = True
_snake_case = model_class(config=lowerCAmelCase_ )
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = torch.jit.trace(
lowerCAmelCase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , 'traced_model.pt' ) )
_snake_case = torch.jit.load(os.path.join(lowerCAmelCase_ , 'traced_model.pt' ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase_ ) , inputs_dict['attention_mask'].to(lowerCAmelCase_ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> Dict:
_snake_case = DistilBertModel.from_pretrained('distilbert-base-uncased' )
_snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = BigBirdConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_snake_case = BigBirdForQuestionAnswering(UpperCamelCase__ )
else:
_snake_case = BigBirdForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase__ , UpperCamelCase__ , is_trivia_qa=UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = 42
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = ("DownEncoderBlock2D",) , lowerCAmelCase_ = ("UpDecoderBlock2D",) , lowerCAmelCase_ = (64,) , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.1_82_15 , lowerCAmelCase_ = "group" , ) -> Tuple:
super().__init__()
# pass init params to Encoder
_snake_case = Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
_snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
_snake_case = VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
_snake_case = Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> VQEncoderOutput:
_snake_case = self.encoder(lowerCAmelCase_ )
_snake_case = self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_snake_case , _snake_case , _snake_case = self.quantize(lowerCAmelCase_ )
else:
_snake_case = h
_snake_case = self.post_quant_conv(lowerCAmelCase_ )
_snake_case = self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_snake_case = sample
_snake_case = self.encode(lowerCAmelCase_ ).latents
_snake_case = self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 295
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Dict:
'''simple docstring'''
_snake_case = 384
if "tiny" in model_name:
_snake_case = [3, 3, 9, 3]
_snake_case = [96, 192, 384, 768]
if "small" in model_name:
_snake_case = [3, 3, 27, 3]
_snake_case = [96, 192, 384, 768]
if "base" in model_name:
_snake_case = [3, 3, 27, 3]
_snake_case = [128, 256, 512, 1_024]
_snake_case = 512
if "large" in model_name:
_snake_case = [3, 3, 27, 3]
_snake_case = [192, 384, 768, 1_536]
_snake_case = 768
if "xlarge" in model_name:
_snake_case = [3, 3, 27, 3]
_snake_case = [256, 512, 1_024, 2_048]
_snake_case = 1_024
# set label information
_snake_case = 150
_snake_case = 'huggingface/label-files'
_snake_case = 'ade20k-id2label.json'
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
_snake_case = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_snake_case = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_snake_case = dct.pop(UpperCamelCase__ )
_snake_case = val
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_snake_case = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
_snake_case = model_name_to_url[model_name]
_snake_case = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['state_dict']
_snake_case = get_upernet_config(UpperCamelCase__ )
_snake_case = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_snake_case = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
_snake_case = key.replace('bn' , 'batch_norm' )
_snake_case = val
# rename keys
_snake_case = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
_snake_case = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
_snake_case = SegformerImageProcessor()
_snake_case = processor(UpperCamelCase__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
_snake_case = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_snake_case = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_snake_case = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_snake_case = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_snake_case = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"upernet-convnext-{size}" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 295
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 1
|
UpperCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 1
|
import tensorflow as tf
from ...tf_utils import shape_list
class UpperCamelCase_ ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Dict:
super().__init__(**lowerCAmelCase_ )
_snake_case = vocab_size
_snake_case = d_embed
_snake_case = d_proj
_snake_case = cutoffs + [vocab_size]
_snake_case = [0] + self.cutoffs
_snake_case = div_val
_snake_case = self.cutoffs[0]
_snake_case = len(self.cutoffs ) - 1
_snake_case = self.shortlist_size + self.n_clusters
_snake_case = keep_order
_snake_case = []
_snake_case = []
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if self.n_clusters > 0:
_snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_weight' )
_snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=lowerCAmelCase_ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case = self.d_embed // (self.div_val**i)
_snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase_ )
_snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._weight''' , )
_snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=lowerCAmelCase_ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[Any]:
_snake_case = x
if proj is not None:
_snake_case = tf.einsum('ibd,ed->ibe' , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum('ibd,nd->ibn' , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = tf.range(lp_size[0] , dtype=target.dtype )
_snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=False ) -> int:
_snake_case = 0
if self.n_clusters == 0:
_snake_case = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
_snake_case = shape_list(lowerCAmelCase_ )
_snake_case = []
_snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_snake_case = (target >= l_idx) & (target < r_idx)
_snake_case = tf.where(lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
_snake_case = self.out_layers[0][0][l_idx:r_idx]
_snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
_snake_case = self.out_layers[i][0]
_snake_case = self.out_layers[i][1]
if i == 0:
_snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
_snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
_snake_case = tf.nn.log_softmax(lowerCAmelCase_ )
_snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
_snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
_snake_case = tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
_snake_case = tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 295
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase_ = """
import os
"""
UpperCAmelCase_ = """
def foo():
import os
return False
"""
UpperCAmelCase_ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
except:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
UpperCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = os.path.join(UpperCamelCase__ , 'test_file.py' )
with open(UpperCamelCase__ , 'w' ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
_snake_case = get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ = pytest.mark.integration
UpperCAmelCase_ = {"""comet"""}
UpperCAmelCase_ = importlib.util.find_spec("""fairseq""") is not None
UpperCAmelCase_ = {"""code_eval"""}
UpperCAmelCase_ = os.name == """nt"""
UpperCAmelCase_ = {"""bertscore""", """frugalscore""", """perplexity"""}
UpperCAmelCase_ = importlib.util.find_spec("""transformers""") is not None
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self : Tuple , UpperCamelCase__ : List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self : str , UpperCamelCase__ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@local
class UpperCamelCase_ ( parameterized.TestCase ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = '[...]'
_snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowerCAmelCase_ ) ).module_path )
_snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase_ )
# check parameters
_snake_case = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCAmelCase_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
_snake_case = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = '[...]'
_snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowerCAmelCase_ ) ).module_path )
# run doctest
with self.use_local_metrics():
_snake_case = doctest.testmod(lowerCAmelCase_ , verbose=lowerCAmelCase_ , raise_on_error=lowerCAmelCase_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase_ ):
yield
else:
yield
@contextmanager
def lowerCAmelCase ( self ) -> Tuple:
def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return load_metric(os.path.join('metrics' , lowerCAmelCase_ ) , *lowerCAmelCase_ , **lowerCAmelCase_ )
with patch('datasets.load_metric' ) as mock_load_metric:
_snake_case = load_local_metric
yield
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ ) -> Optional[int]:
def wrapper(lowerCAmelCase_ ):
_snake_case = contextmanager(lowerCAmelCase_ )
_snake_case = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class UpperCamelCase_ ( _lowerCamelCase ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_snake_case = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_snake_case = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase__ : int ):
class UpperCamelCase_ :
def lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Tuple:
assert len(lowerCAmelCase_ ) == 2
_snake_case = [0.19, 0.92]
return scores, sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_snake_case = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_snake_case = load_from_checkpoint
yield
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = load_metric(os.path.join('metrics' , 'seqeval' ) )
_snake_case = 'ERROR'
_snake_case = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase__ )
| 295
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 1
|
from __future__ import annotations
UpperCAmelCase_ = [True] * 1000001
UpperCAmelCase_ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
UpperCAmelCase_ = False
i += 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return seive[n]
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return any(digit in '02468' for digit in str(UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : int = 1_000_000 ) -> list[int]:
'''simple docstring'''
_snake_case = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(UpperCamelCase__ ) and not contains_an_even_digit(UpperCamelCase__ ):
_snake_case = str(UpperCamelCase__ )
_snake_case = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCamelCase__ ) )]
if all(is_prime(UpperCamelCase__ ) for i in list_nums ):
result.append(UpperCamelCase__ )
return result
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 295
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 295
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
import functools
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
_snake_case = len(UpperCamelCase__ )
_snake_case = len(UpperCamelCase__ )
@functools.cache
def min_distance(UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase__ ) , 1 + min_distance(UpperCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = """src/diffusers"""
UpperCAmelCase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ = spec.loader.load_module()
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , UpperCamelCase__ ) is not None
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
_snake_case = object_name.split('.' )
_snake_case = 0
# First let's find the module where our object lives.
_snake_case = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCamelCase__ ):
_snake_case = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCamelCase__ , F'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
# Now let's find the class / func in the code!
_snake_case = ''
_snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_snake_case = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
UpperCAmelCase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCAmelCase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCAmelCase_ = re.compile(R"""<FILL\s+[^>]*>""")
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
_snake_case = code.split('\n' )
_snake_case = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
_snake_case = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
_snake_case = F'''class Bla:\n{code}'''
_snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
_snake_case = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
_snake_case , _snake_case = style_docstrings_in_code(UpperCamelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=False ) -> str:
'''simple docstring'''
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
_snake_case = []
_snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
_snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_snake_case , _snake_case , _snake_case = search.groups()
_snake_case = find_code_in_diffusers(UpperCamelCase__ )
_snake_case = get_indent(UpperCamelCase__ )
_snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
_snake_case = theoretical_indent
_snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_snake_case = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
_snake_case = lines[line_index]
_snake_case = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(F'''^{indent}# End copy''' , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
_snake_case = ''.join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
_snake_case = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
_snake_case = '\n'.join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
_snake_case = replace_pattern.replace('with' , '' ).split(',' )
_snake_case = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_snake_case , _snake_case , _snake_case = pattern.groups()
_snake_case = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
_snake_case = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
_snake_case = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_snake_case = blackify(lines[start_index - 1] + theoretical_code )
_snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
_snake_case = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def lowerCamelCase__ ( UpperCamelCase__ : bool = False ) -> List[str]:
'''simple docstring'''
_snake_case = glob.glob(os.path.join(UpperCamelCase__ , '**/*.py' ) , recursive=UpperCamelCase__ )
_snake_case = []
for filename in all_files:
_snake_case = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
_snake_case = '\n'.join(UpperCamelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = RobertaTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> int:
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase_ ) != add_prefix_space:
_snake_case = getattr(lowerCAmelCase_ , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowerCAmelCase_ )
_snake_case = add_prefix_space
_snake_case = 'post_processor'
_snake_case = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['sep'] )
if "cls" in state:
_snake_case = tuple(state['cls'] )
_snake_case = False
if state.get('add_prefix_space' , lowerCAmelCase_ ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('trim_offsets' , lowerCAmelCase_ ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(lowerCAmelCase_ , state.pop('type' ) )
_snake_case = component_class(**lowerCAmelCase_ )
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value
_snake_case = value
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> BatchEncoding:
_snake_case = kwargs.get('is_split_into_words' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> BatchEncoding:
_snake_case = kwargs.get('is_split_into_words' , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[Any]:
_snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg] , UpperCamelCase__ )
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return False
_snake_case = len(UpperCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , UpperCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter numbers separated by comma:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
UpperCAmelCase_ = int(input("""Enter the number to be found in the list:\n""").strip())
UpperCAmelCase_ = """""" if binary_search(sequence, target) else """not """
print(F"{target} was {not_str}found in {sequence}")
| 295
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
UpperCAmelCase_ = logging.WARNING
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_snake_case = os.getenv('DATASETS_VERBOSITY' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
return __name__.split('.' )[0]
def lowerCamelCase__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
_snake_case = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return set_verbosity(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = False
def lowerCamelCase__ ( ) -> None:
'''simple docstring'''
_snake_case = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase_ :
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]: # pylint: disable=unused-argument
_snake_case = args[0] if args else None
def __iter__( self ) -> str:
return iter(self._iterator )
def __getattr__( self , lowerCAmelCase_ ) -> List[str]:
def empty_fn(*lowerCAmelCase_ , **lowerCAmelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> str:
return self
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
return
UpperCAmelCase_ = True
class UpperCamelCase_ :
def __call__( self , *lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ = _tqdm_cls()
def lowerCamelCase__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
global _tqdm_active
_snake_case = True
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
global _tqdm_active
_snake_case = False
| 295
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 1
|
from typing import List
import numpy as np
def lowerCamelCase__ ( UpperCamelCase__ : dict ) -> int:
'''simple docstring'''
_snake_case = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_snake_case = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> List[range]:
'''simple docstring'''
_snake_case = []
for group_idx in range(UpperCamelCase__ ):
_snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_snake_case = range(UpperCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase__ )
return shards_indices_per_group
def lowerCamelCase__ ( UpperCamelCase__ : dict , UpperCamelCase__ : int ) -> List[dict]:
'''simple docstring'''
_snake_case = _number_of_shards_in_gen_kwargs(UpperCamelCase__ )
if num_shards == 1:
return [dict(UpperCamelCase__ )]
else:
_snake_case = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase__ ) )
]
def lowerCamelCase__ ( UpperCamelCase__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase__ ( UpperCamelCase__ : np.random.Generator , UpperCamelCase__ : dict ) -> dict:
'''simple docstring'''
_snake_case = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
_snake_case = {}
for size in list_sizes:
_snake_case = list(range(UpperCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_snake_case = dict(UpperCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]]
return shuffled_kwargs
| 295
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295
| 1
|
import pprint
import requests
UpperCAmelCase_ = """https://zenquotes.io/api"""
def lowerCamelCase__ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def lowerCamelCase__ ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
UpperCAmelCase_ = random_quotes()
pprint.pprint(response)
| 295
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ )
}
| 295
| 1
|
from bisect import bisect
from itertools import accumulate
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
_snake_case = sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=UpperCamelCase__ )
_snake_case , _snake_case = [i[0] for i in r], [i[1] for i in r]
_snake_case = list(accumulate(UpperCamelCase__ ) )
_snake_case = bisect(UpperCamelCase__ , UpperCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCAmelCase_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> str:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
UpperCAmelCase_ = int(input("""Enter number: """).strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 295
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
| 1
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> int:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_snake_case = model
_snake_case = kwargs.get('model_save_dir' , lowerCAmelCase_ )
_snake_case = kwargs.get('latest_model_name' , lowerCAmelCase_ )
def __call__( self , **lowerCAmelCase_ ) -> Tuple:
_snake_case = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_snake_case = 'CPUExecutionProvider'
return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> List[Any]:
_snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_snake_case = self.model_save_dir.joinpath(self.latest_model_name )
_snake_case = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_snake_case = self.model_save_dir.joinpath(lowerCAmelCase_ )
if src_path.exists():
_snake_case = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ , ) -> Any:
if os.path.isfile(lowerCAmelCase_ ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> Optional[int]:
_snake_case = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase_ ):
_snake_case = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
_snake_case = Path(lowerCAmelCase_ )
# load model from hub
else:
# download model
_snake_case = hf_hub_download(
repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , )
_snake_case = Path(lowerCAmelCase_ ).parent
_snake_case = Path(lowerCAmelCase_ ).name
_snake_case = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> str:
_snake_case = None
if len(str(lowerCAmelCase_ ).split('@' ) ) == 2:
_snake_case , _snake_case = model_id.split('@' )
return cls._from_pretrained(
model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 295
|
import random
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ) -> dict:
'''simple docstring'''
_snake_case = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
| 1
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return getitem, k
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
return setitem, k, v
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return delitem, k
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , *UpperCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
return fun(UpperCamelCase__ , *UpperCamelCase__ ), None
except Exception as e:
return None, e
UpperCAmelCase_ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCAmelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCAmelCase_ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCAmelCase_ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
_snake_case = HashMap(initial_block_size=4 )
_snake_case = {}
for _, (fun, *args) in enumerate(UpperCamelCase__ ):
_snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
_snake_case , _snake_case = _run_operation(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
assert my_res == py_res
assert str(UpperCamelCase__ ) == str(UpperCamelCase__ )
assert set(UpperCamelCase__ ) == set(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
def is_public(UpperCamelCase__ : str ) -> bool:
return not name.startswith('_' )
_snake_case = {name for name in dir({} ) if is_public(UpperCamelCase__ )}
_snake_case = {name for name in dir(HashMap() ) if is_public(UpperCamelCase__ )}
assert dict_public_names > hash_public_names
| 295
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> str:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase ( self ) -> Dict:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowerCAmelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowerCAmelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> str:
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 295
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LEDTokenizer
lowerCAmelCase_ = LEDTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['A long paragraph for summarization.']
_snake_case = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = tokenizer(lowerCAmelCase_ , return_tensors='pt' )
_snake_case = tokenizer(text_target=lowerCAmelCase_ , return_tensors='pt' )
_snake_case = inputs['input_ids']
_snake_case = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case = ['Summary of the text.', 'Another summary.']
_snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_snake_case = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
pass
def lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 295
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
UpperCAmelCase_ = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ) -> Dict:
'''simple docstring'''
_snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Optional[int]:
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
super().__init__(
errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase_ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
_snake_case = merges_handle.read().split('\n' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase ( self ) -> int:
return len(self.encoder )
def lowerCAmelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase_ ):
try:
_snake_case = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase_ )
_snake_case = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase_ )
_snake_case = ' '.join(lowerCAmelCase_ )
_snake_case = word
return word
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase_ ):
_snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase_ ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.decoder.get(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
_snake_case = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_snake_case = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase_ ) > 0 and not text[0].isspace()):
_snake_case = ' ' + text
return (text, kwargs)
| 295
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = BertTokenizer
lowerCAmelCase_ = BertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
def lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = 'UNwant\u00E9d,running'
_snake_case = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
_snake_case = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
_snake_case = 'UNwant\u00E9d,running'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.get_rust_tokenizer()
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowerCAmelCase ( self ) -> Any:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = BasicTokenizer()
_snake_case = 'a\n\'ll !!to?\'d of, can\'t.'
_snake_case = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_snake_case = {}
for i, token in enumerate(lowerCAmelCase_ ):
_snake_case = i
_snake_case = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowerCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = self.tokenizer_class.from_pretrained('bert-base-uncased' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
_snake_case = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , 'do_lower_case' ) else False
_snake_case = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def lowerCAmelCase ( self ) -> str:
_snake_case = ['的', '人', '有']
_snake_case = ''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = True
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
_snake_case = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 295
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
inspect_dataset(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = path + '.py'
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Tuple:
'''simple docstring'''
inspect_metric(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = path + '.py'
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_dataset_config_names(UpperCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
_snake_case = get_dataset_infos(UpperCamelCase__ )
assert list(infos.keys() ) == expected_configs
_snake_case = expected_configs[0]
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_snake_case = get_dataset_infos(UpperCamelCase__ )
assert expected_config in infos
_snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
get_dataset_split_names(UpperCamelCase__ , config_name=UpperCamelCase__ )
| 295
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase_ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> List[str]:
_snake_case = None
_snake_case = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
_snake_case = os.path.abspath('examples' )
for item in os.listdir(lowerCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
_snake_case = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section='main()' if parser_only else 'training_function()' , ):
_snake_case = compare_against_test(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = '\n'.join(lowerCAmelCase_ )
if special_strings is not None:
for string in special_strings:
_snake_case = diff.replace(lowerCAmelCase_ , '' )
self.assertEqual(lowerCAmelCase_ , '' )
def lowerCAmelCase ( self ) -> List[str]:
self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase_ )
self.one_complete_example('complete_nlp_example.py' , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
_snake_case = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.one_complete_example('complete_cv_example.py' , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = False
@classmethod
def lowerCAmelCase ( cls ) -> Optional[Any]:
super().setUpClass()
_snake_case = tempfile.mkdtemp()
_snake_case = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
_snake_case = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowerCAmelCase ( self ) -> int:
_snake_case = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
_snake_case = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
_snake_case = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
self.assertNotIn('epoch 0:' , lowerCAmelCase_ )
self.assertIn('epoch 1:' , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
_snake_case = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
if torch.cuda.is_available():
_snake_case = torch.cuda.device_count()
else:
_snake_case = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowerCAmelCase_ )
self.assertIn('epoch 1:' , lowerCAmelCase_ )
else:
self.assertIn('epoch 0:' , lowerCAmelCase_ )
self.assertIn('epoch 1:' , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
_snake_case = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
_snake_case = re.findall('({.+})' , lowerCAmelCase_ )
_snake_case = [r for r in results if 'accuracy' in r][-1]
_snake_case = ast.literal_eval(lowerCAmelCase_ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowerCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_snake_case = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , 'tracking' ) ) )
def lowerCAmelCase ( self ) -> int:
_snake_case = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 295
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def lowerCamelCase__ ( UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
_snake_case = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
_snake_case = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
_snake_case = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
_snake_case = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
_snake_case = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_snake_case = None
builder_instance.download_and_prepare()
_snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = dataset_module_factory('wikipedia' , cache_dir=UpperCamelCase__ )
_snake_case = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
_snake_case = builder_cls(
cache_dir=UpperCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
_snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , UpperCamelCase__ )
assert next(iter(ds['train'] ) )
| 295
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase_ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase__ ( UpperCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
_snake_case = list(s_dict.keys() )
for key in keys:
_snake_case = R'.*/layers_(\d+)'
_snake_case = key
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCamelCase__ )
_snake_case = R'(encoder|decoder)\/'
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = re.match(UpperCamelCase__ , UpperCamelCase__ ).groups()
if groups[0] == "encoder":
_snake_case = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCamelCase__ )
_snake_case = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCamelCase__ )
elif groups[0] == "decoder":
_snake_case = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCamelCase__ )
_snake_case = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCamelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_snake_case = new_key.replace(UpperCamelCase__ , UpperCamelCase__ )
print(F'''{key} -> {new_key}''' )
_snake_case = s_dict.pop(UpperCamelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_snake_case = s_dict[key].shape[0]
_snake_case = s_dict[key]
for idx in range(UpperCamelCase__ ):
_snake_case = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(UpperCamelCase__ )
return s_dict
UpperCAmelCase_ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
import regex as re
with open(UpperCamelCase__ , 'r' ) as f:
_snake_case = f.read()
_snake_case = re.findall(R'(.*) = ([0-9.]*)' , UpperCamelCase__ )
_snake_case = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_snake_case = float(UpperCamelCase__ ) if '.' in value else int(UpperCamelCase__ )
_snake_case = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCamelCase__ )[0]
_snake_case = str(activation[1] )
_snake_case = num_experts
_snake_case = SwitchTransformersConfig(**UpperCamelCase__ )
return config
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]="./" , UpperCamelCase__ : Dict=8 ) -> List[str]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_snake_case = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
if gin_file is not None:
_snake_case = convert_gin_to_config(UpperCamelCase__ , UpperCamelCase__ )
else:
_snake_case = SwitchTransformersConfig.from_pretrained(UpperCamelCase__ )
_snake_case = SwitchTransformersForConditionalGeneration(UpperCamelCase__ )
_snake_case = flax_params['target']
_snake_case = flatten_dict(UpperCamelCase__ , sep='/' )
_snake_case = rename_keys(UpperCamelCase__ )
_snake_case = unflatten_dict(UpperCamelCase__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
UpperCAmelCase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 295
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
| 1
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase__ ( UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None ) -> Optional[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCAmelCase_ = field(
metadata={'''help''': '''The csv file to plot.'''} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
lowerCAmelCase_ = list_field(
default=_lowerCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> str:
'''simple docstring'''
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> List[str]:
'''simple docstring'''
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ ) -> Tuple:
_snake_case = args
_snake_case = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_snake_case = csv.DictReader(lowerCAmelCase_ )
for row in reader:
_snake_case = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_snake_case = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_snake_case = float(row['result'] )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case , _snake_case = plt.subplots()
_snake_case = 'Time usage' if self.args.is_time else 'Memory usage'
_snake_case = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_snake_case = sorted(set(self.result_dict[model_name]['bsz'] ) )
_snake_case = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_snake_case = self.result_dict[model_name]['result']
((_snake_case) , (_snake_case)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_snake_case = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_snake_case = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase_ , )
else:
_snake_case = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_snake_case) , (_snake_case)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_snake_case = np.asarray(lowerCAmelCase_ , lowerCAmelCase_ )[: len(lowerCAmelCase_ )]
plt.scatter(
lowerCAmelCase_ , lowerCAmelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCAmelCase_ , lowerCAmelCase_ , '--' )
title_str += F''' {label_model_name} vs.'''
_snake_case = title_str[:-4]
_snake_case = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase_ )
plt.xlabel(lowerCAmelCase_ )
plt.ylabel(lowerCAmelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_snake_case = HfArgumentParser(UpperCamelCase__ )
_snake_case = parser.parse_args_into_dataclasses()[0]
_snake_case = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = TaTokenizer
lowerCAmelCase_ = []
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=100 , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_snake_case = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_snake_case = len(set(filter(lambda lowerCAmelCase_ : bool('extra_id_' in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
_snake_case = extra_ids
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_snake_case = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase_ , )
return max_model_length
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_snake_case = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self ) -> Any:
return list(
set(filter(lambda lowerCAmelCase_ : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self ) -> Dict:
return [self.convert_tokens_to_ids(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 295
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( UpperCamelCase__ : bool = True , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_snake_case = False
if main_process_only:
_snake_case = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase__ , **UpperCamelCase__ , disable=UpperCamelCase__ )
| 295
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""DPTFeatureExtractor"""]
UpperCAmelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 295
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
UpperCAmelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase_ :
lowerCAmelCase_ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase_ = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self ) -> Any:
_snake_case = {}
if self.train_dir is not None:
_snake_case = self.train_dir
if self.validation_dir is not None:
_snake_case = self.validation_dir
_snake_case = data_files if data_files else None
@dataclass
class UpperCamelCase_ :
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCamelCase )} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(default=_lowerCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=_lowerCamelCase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_=192 , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=0.6 ) -> Tuple:
_snake_case = input_size
_snake_case = mask_patch_size
_snake_case = model_patch_size
_snake_case = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
_snake_case = self.input_size // self.mask_patch_size
_snake_case = self.mask_patch_size // self.model_patch_size
_snake_case = self.rand_size**2
_snake_case = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Any:
_snake_case = np.random.permutation(self.token_count )[: self.mask_count]
_snake_case = np.zeros(self.token_count , dtype=lowerCAmelCase_ )
_snake_case = 1
_snake_case = mask.reshape((self.rand_size, self.rand_size) )
_snake_case = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_snake_case = torch.stack([example['pixel_values'] for example in examples] )
_snake_case = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_snake_case = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase__ ) and data_args.train_val_split > 0.0:
_snake_case = ds['train'].train_test_split(data_args.train_val_split )
_snake_case = split['train']
_snake_case = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_snake_case = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCamelCase__ )
elif model_args.model_name_or_path:
_snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
_snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCamelCase__ , 'decoder_type' ):
_snake_case = 'simmim'
# adapt config
_snake_case = model_args.image_size if model_args.image_size is not None else config.image_size
_snake_case = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_snake_case = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_snake_case = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
_snake_case = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
_snake_case = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_snake_case = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_snake_case = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_snake_case = AutoModelForMaskedImageModeling.from_config(UpperCamelCase__ )
if training_args.do_train:
_snake_case = ds['train'].column_names
else:
_snake_case = ds['validation'].column_names
if data_args.image_column_name is not None:
_snake_case = data_args.image_column_name
elif "image" in column_names:
_snake_case = 'image'
elif "img" in column_names:
_snake_case = 'img'
else:
_snake_case = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_snake_case = Compose(
[
Lambda(lambda UpperCamelCase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_snake_case = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCamelCase__ : Dict ):
_snake_case = [transforms(UpperCamelCase__ ) for image in examples[image_column_name]]
_snake_case = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_snake_case = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_snake_case = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase__ )
# Initialize our trainer
_snake_case = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('eval' , UpperCamelCase__ )
trainer.save_metrics('eval' , UpperCamelCase__ )
# Write model card and (optionally) push to hub
_snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
if __name__ == "__main__":
main()
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase_ = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
UpperCAmelCase_ = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
UpperCAmelCase_ = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def lowerCAmelCase ( self ) -> str:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> Optional[Any]:
_snake_case = len(references[0] )
if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )]
_snake_case = TER(
normalized=lowerCAmelCase_ , no_punct=lowerCAmelCase_ , asian_support=lowerCAmelCase_ , case_sensitive=lowerCAmelCase_ , )
_snake_case = sb_ter.corpus_score(lowerCAmelCase_ , lowerCAmelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 295
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295
| 1
|
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case , _snake_case = text, pattern
_snake_case , _snake_case = len(lowerCAmelCase_ ), len(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
_snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case = self.mismatch_in_text(lowerCAmelCase_ )
if mismatch_index == -1:
positions.append(lowerCAmelCase_ )
else:
_snake_case = self.match_in_pattern(self.text[mismatch_index] )
_snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase_ = """ABAABA"""
UpperCAmelCase_ = """AB"""
UpperCAmelCase_ = BoyerMooreSearch(text, pattern)
UpperCAmelCase_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 295
|
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
_snake_case = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
_snake_case = -1
return False
def lowerCamelCase__ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int = 0 ) -> list[int]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
_snake_case = _snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 295
| 1
|
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return 1
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : int = 200 ) -> int:
'''simple docstring'''
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> Dict:
'''simple docstring'''
_snake_case = [False] * len(UpperCamelCase__ )
_snake_case = []
queue.append(UpperCamelCase__ )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
_snake_case = True
_snake_case = u
return visited[t]
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_snake_case = [-1] * (len(UpperCamelCase__ ))
_snake_case = 0
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = float('Inf' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(UpperCamelCase__ , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase_ , UpperCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case = 9, 14 # noqa: F841
_snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_snake_case = defaultdict(UpperCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_snake_case = mst(UpperCamelCase__ )
_snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_snake_case = tuple(answer[:2] )
_snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 295
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 295
|
from collections.abc import Sequence
def lowerCamelCase__ ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ) -> float:
'''simple docstring'''
if not arr:
return 0
_snake_case = 0 if allow_empty_subarrays else float('-inf' )
_snake_case = 0.0
for num in arr:
_snake_case = max(0 if allow_empty_subarrays else num , curr_sum + num )
_snake_case = max(UpperCamelCase__ , UpperCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 295
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.