code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = "roformer"
def __init__( self : Tuple , __a : List[str]=5_0000 , __a : Union[str, Any]=None , __a : Tuple=768 , __a : Union[str, Any]=12 , __a : Optional[Any]=12 , __a : List[Any]=3072 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : Dict=0.1 , __a : int=1536 , __a : Union[str, Any]=2 , __a : Optional[int]=0.02 , __a : int=1e-1_2 , __a : List[str]=0 , __a : Any=False , __a : str=True , **__a : Union[str, Any] , ) -> Optional[int]:
super().__init__(pad_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size if embedding_size is None else embedding_size
_UpperCamelCase : str = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : int = layer_norm_eps
_UpperCamelCase : Optional[int] = rotary_value
_UpperCamelCase : int = use_cache
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : int = {0: "batch", 1: "sequence"}
_UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str
SCREAMING_SNAKE_CASE__ :int
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowercase_ ) )]
def lowercase__ ( lowercase_ ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCamelCase : str = all_rotations(lowercase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase_ ),
}
return response
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCamelCase : str = int(lowercase_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowercase_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCamelCase : Dict = [""] * len(lowercase_ )
for _ in range(len(lowercase_ ) ):
for i in range(len(lowercase_ ) ):
_UpperCamelCase : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ = "Provide a string that I will generate its BWT transform: "
lowerCamelCase__ = input(entry_msg).strip()
lowerCamelCase__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
lowerCamelCase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __a : Dict , __a : Optional[int]=3 , __a : Optional[Any]=32 , __a : int=3 , __a : Tuple=10 , __a : List[Any]=[10, 20, 30, 40] , __a : Tuple=[1, 1, 2, 1] , __a : Optional[int]=True , __a : List[str]=True , __a : List[Any]="relu" , __a : Union[str, Any]=3 , __a : List[Any]=None , ) -> Dict:
_UpperCamelCase : int = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : List[Any] = embeddings_size
_UpperCamelCase : List[Any] = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Dict = is_training
_UpperCamelCase : Optional[int] = use_labels
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = num_labels
_UpperCamelCase : List[Any] = scope
_UpperCamelCase : Tuple = len(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : List[Any] , __a : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RegNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) -> str:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : Optional[int] = RegNetForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :Tuple = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = RegNetModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(config=__a )
for name, module in model.named_modules():
if isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
def check_hidden_states_output(__a : Union[str, Any] , __a : Any , __a : int ):
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : int = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase : Tuple = layer_type
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[Any] = RegNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_UpperCamelCase : Optional[Any] = nn.Parameter(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = np.asarray(weights[0] )
_UpperCamelCase : Any = np.asarray(weights[1] )
_UpperCamelCase : str = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase_ ).view(-1 ,lowercase_ ).contiguous().transpose(0 ,1 ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = np.asarray(weights[0] )
_UpperCamelCase : Optional[int] = np.asarray(weights[1] )
_UpperCamelCase : str = np.asarray(weights[2] )
_UpperCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(lowercase_ ).transpose(1 ,2 ).contiguous().view(-1 ,lowercase_ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(lowercase_ ).view(-1 ,lowercase_ ).contiguous().transpose(0 ,1 ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = weights[0][0][0]
_UpperCamelCase : Any = np.asarray(layer_norm_a[0] )
_UpperCamelCase : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# lsh weights + output
_UpperCamelCase : List[Any] = weights[0][1]
if len(lowercase_ ) < 4:
set_layer_weights_in_torch_lsh(lowercase_ ,torch_block.attention ,lowercase_ )
else:
set_layer_weights_in_torch_local(lowercase_ ,torch_block.attention ,lowercase_ )
# intermediate weighs
_UpperCamelCase : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase_ ) == 4:
_UpperCamelCase : Tuple = intermediate_weights[2]
# layernorm 2
_UpperCamelCase : int = np.asarray(intermediate_weights[0][0] )
_UpperCamelCase : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# intermediate dense
_UpperCamelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
_UpperCamelCase : Tuple = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
# intermediate out
_UpperCamelCase : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
_UpperCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = torch_model.reformer
# word embeds
_UpperCamelCase : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(lowercase_ ) ,)
if isinstance(weights[3] ,lowercase_ ):
_UpperCamelCase : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_UpperCamelCase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_UpperCamelCase : Dict = nn.Parameter(torch.tensor(lowercase_ ) )
_UpperCamelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_UpperCamelCase : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase_ ,lowercase_ ,lowercase_ )
# output layer norm
_UpperCamelCase : int = np.asarray(weights[7][0] )
_UpperCamelCase : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(lowercase_ ) ,torch.tensor(lowercase_ ) ,)
# output embeddings
_UpperCamelCase : str = np.asarray(weights[9][0] )
_UpperCamelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(lowercase_ ).transpose(0 ,1 ).contiguous() ,torch.tensor(lowercase_ ) ,)
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Dict = ReformerConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : Dict = ReformerModelWithLMHead(lowercase_ )
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : Optional[Any] = pickle.load(lowercase_ )["weights"]
set_model_weights_in_torch(lowercase_ ,lowercase_ ,config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_UpperCamelCase : List[str] = Vector()
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__a ) , "(0,0,0,0,0,1)" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : List[str] = Vector([1, 2, 3, 4] )
self.assertEqual(len(__a ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
_UpperCamelCase : List[Any] = Vector([1, 2] )
_UpperCamelCase : Dict = Vector([1, 2, 3, 4, 5] )
_UpperCamelCase : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_UpperCamelCase : Tuple = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : int = Vector([1, 2, 3] )
_UpperCamelCase : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Dict = Vector([1, 2, 3] )
_UpperCamelCase : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : Dict = Vector([1, 2, 3] )
_UpperCamelCase : Union[str, Any] = Vector([2, -1, 4] ) # for test of dot product
_UpperCamelCase : Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> None:
_UpperCamelCase : int = Vector([1, 2, 3] )
_UpperCamelCase : Any = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __a , __a ) ) , "(3,4,7)" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
_UpperCamelCase : str = Vector([1, 0, 0, 0, 0, 0] )
_UpperCamelCase : Any = x.copy()
self.assertEqual(str(__a ) , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__a ) , "(0,1,0)" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
_UpperCamelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> None:
_UpperCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Union[str, Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
_UpperCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> None:
_UpperCamelCase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_UpperCamelCase : Dict = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
_UpperCamelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> None:
_UpperCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
_UpperCamelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_UpperCamelCase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
import qiskit
def lowercase__ ( lowercase_ ,lowercase_ ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCamelCase : str = qiskit.QuantumCircuit(lowercase_ ,lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
_UpperCamelCase : str = qiskit.execute(lowercase_ ,lowercase_ ,shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
from manim import *
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
_UpperCamelCase : int = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Optional[Any] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : int = VGroup(__a , __a ).arrange(__a , buff=0 )
_UpperCamelCase : Any = Text("CPU" , font_size=24 )
_UpperCamelCase : List[str] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_UpperCamelCase : Optional[int] = [mem.copy() for i in range(4 )]
_UpperCamelCase : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Dict = Text("GPU" , font_size=24 )
_UpperCamelCase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_UpperCamelCase : Tuple = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : str = Text("Model" , font_size=24 )
_UpperCamelCase : Any = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_UpperCamelCase : Any = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCamelCase : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_UpperCamelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : str = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase : Any = Text("Loaded Checkpoint" , font_size=24 )
_UpperCamelCase : List[str] = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCamelCase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Optional[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_UpperCamelCase : Dict = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCamelCase : Optional[int] = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : List[str] = []
for i, rect in enumerate(__a ):
_UpperCamelCase : int = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_UpperCamelCase : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_000 - i ,-1_000 - i ,-1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
assert all(row == sorted(lowercase_ ,reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ ,reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : str = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase : Dict = (left + right) // 2
_UpperCamelCase : str = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase : Optional[Any] = mid + 1
else:
_UpperCamelCase : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = 0
_UpperCamelCase : List[str] = len(grid[0] )
for i in range(len(lowercase_ ) ):
_UpperCamelCase : List[str] = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def lowercase__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
_UpperCamelCase : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase : List[str] = timeit(F'''{func}(grid=grid)''' ,setup=lowercase_ ,number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : str = 0.00
_UpperCamelCase : int = 0
for resistor in resistors:
if resistor <= 0:
_UpperCamelCase : str = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def lowercase__ ( lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : str = 0.00
_UpperCamelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_UpperCamelCase : int = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 0
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = (UnCLIPScheduler,)
def __SCREAMING_SNAKE_CASE ( self : str , **__a : Tuple ) -> Tuple:
_UpperCamelCase : Union[str, Any] = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__a )
return config
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__a , prev_timestep=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config(variance_type="fixed_small_log" )
_UpperCamelCase : int = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : List[str] = self.get_scheduler_config(variance_type="learned_range" )
_UpperCamelCase : Dict = scheduler_class(**__a )
_UpperCamelCase : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__a ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__a ) - -0.0_01_00_11 < 1e-5
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : List[Any] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[Any] = scheduler_class(**__a )
_UpperCamelCase : List[Any] = scheduler.timesteps
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : List[Any] = self.dummy_sample_deter
_UpperCamelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_UpperCamelCase : int = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : Union[str, Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_UpperCamelCase : Dict = pred_prev_sample
_UpperCamelCase : Optional[int] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Optional[int] = self.scheduler_classes[0]
_UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
_UpperCamelCase : Any = scheduler_class(**__a )
scheduler.set_timesteps(25 )
_UpperCamelCase : Tuple = scheduler.timesteps
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : int = self.dummy_sample_deter
_UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(__a ):
# 1. predict noise residual
_UpperCamelCase : List[str] = model(__a , __a )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase : List[Any] = None
else:
_UpperCamelCase : int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase : List[str] = scheduler.step(
__a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample
_UpperCamelCase : Union[str, Any] = pred_prev_sample
_UpperCamelCase : List[Any] = torch.sum(torch.abs(__a ) )
_UpperCamelCase : Dict = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
pass
| 720
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = "convnextv2"
def __init__( self : List[Any] , __a : Optional[int]=3 , __a : str=4 , __a : Tuple=4 , __a : Dict=None , __a : Union[str, Any]=None , __a : List[Any]="gelu" , __a : Tuple=0.02 , __a : List[str]=1e-1_2 , __a : Optional[int]=0.0 , __a : int=224 , __a : List[Any]=None , __a : Tuple=None , **__a : Tuple , ) -> Optional[int]:
super().__init__(**__a )
_UpperCamelCase : int = num_channels
_UpperCamelCase : int = patch_size
_UpperCamelCase : int = num_stages
_UpperCamelCase : Tuple = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_UpperCamelCase : Any = [3, 3, 9, 3] if depths is None else depths
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : List[Any] = drop_path_rate
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Optional[int] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : int = script.contents[0]
_UpperCamelCase : List[str] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : int ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Dict = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> dict:
_UpperCamelCase : Optional[Any] = requests.get(self.url , headers=__a ).text
_UpperCamelCase : str = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Optional[Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 700
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 0
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : int = inspect.getfile(accelerate.test_utils )
_UpperCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCamelCase : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : int = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
_UpperCamelCase : Optional[int] = [sys.executable] + distributed_args
execute_subprocess_async(__a , env=os.environ.copy() )
| 701
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 0
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : Any ) -> None:
_UpperCamelCase : Any = data
_UpperCamelCase : Node | None = None
def __iter__( self : Union[str, Any] ) -> int:
_UpperCamelCase : Any = self
_UpperCamelCase : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
_UpperCamelCase : List[Any] = node.next_node
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase__ = Node(1)
lowerCamelCase__ = Node(2)
lowerCamelCase__ = Node(3)
lowerCamelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCamelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
lowerCamelCase__ = Node(5)
lowerCamelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCamelCase__ = Node(1)
print(root_node.has_loop) # False
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 0
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __a : Optional[Any] , __a : str=13 , __a : int=7 , __a : List[Any]=True , __a : Optional[int]=True , __a : Any=False , __a : Optional[int]=True , __a : Optional[Any]=99 , __a : Dict=64 , __a : Union[str, Any]=5 , __a : Union[str, Any]=4 , __a : List[Any]=64 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : Any=0.1 , __a : Dict=512 , __a : Optional[Any]=16 , __a : str=2 , __a : Union[str, Any]=0.02 , __a : List[Any]=3 , __a : List[Any]=4 , __a : Dict=None , ) -> int:
_UpperCamelCase : List[str] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = seq_length
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : List[str] = use_token_type_ids
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Union[str, Any] = num_hidden_layers
_UpperCamelCase : List[Any] = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Any = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[Any] = num_labels
_UpperCamelCase : int = num_choices
_UpperCamelCase : List[Any] = scope
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Tuple = None
if self.use_input_mask:
_UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Dict = None
_UpperCamelCase : Tuple = None
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : int , __a : List[Any] ) -> Optional[Any]:
_UpperCamelCase : str = MPNetModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , __a )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Any , __a : Optional[int] , __a : List[Any] , __a : str ) -> str:
_UpperCamelCase : Tuple = MPNetForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Dict , __a : int , __a : Optional[Any] , __a : Optional[Any] , __a : List[str] , __a : Optional[int] ) -> Dict:
_UpperCamelCase : str = self.num_labels
_UpperCamelCase : List[str] = MPNetForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Tuple , __a : Any , __a : Dict , __a : Optional[int] , __a : str , __a : str ) -> List[str]:
_UpperCamelCase : int = self.num_choices
_UpperCamelCase : Any = MPNetForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Any = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[Any] , __a : Union[str, Any] , __a : str , __a : Any , __a : List[str] , __a : List[str] ) -> Optional[int]:
_UpperCamelCase : Tuple = self.num_labels
_UpperCamelCase : List[Any] = MPNetForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = self.prepare_config_and_inputs()
(_UpperCamelCase) : List[str] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Dict = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : int = MPNetModelTester(self )
_UpperCamelCase : str = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__a )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
_UpperCamelCase : Tuple = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCamelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : int = model(__a )[0]
_UpperCamelCase : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Optional[int] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 703
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = psutil.Process()
_UpperCamelCase : Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Tuple = -1
while True:
_UpperCamelCase : int = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_UpperCamelCase : Dict = True
self.thread.start()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def lowercase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase : List[str] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : Any = torch.cuda.memory_allocated(lowercase_ )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCamelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCamelCase : List[str] = (torch.cuda.memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
_UpperCamelCase : List[str] = (torch.cuda.max_memory_allocated(lowercase_ ) - start_measures[str(lowercase_ )]) / 2**20
return measures
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(lowercase_ )]:.2f}MiB''' )
_UpperCamelCase : Union[str, Any] = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 704
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ = TaTokenizerFast
lowerCamelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE__ :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE__ :List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : str = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[Any]=False ) -> Tuple:
if class_cond:
_UpperCamelCase : Union[str, Any] = self.dummy_cond_unet
else:
_UpperCamelCase : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any , __a : Any=0 ) -> Optional[int]:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Optional[int] = torch.manual_seed(__a )
else:
_UpperCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = self.get_dummy_components()
_UpperCamelCase : Optional[int] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : int = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Dict = self.get_dummy_inputs(__a )
_UpperCamelCase : Optional[Any] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Union[str, Any] = self.get_dummy_components(class_cond=__a )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[str] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : str = image[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Optional[Any] = self.get_dummy_components(class_cond=__a )
_UpperCamelCase : List[str] = ConsistencyModelPipeline(**__a )
_UpperCamelCase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : List[str] = None
_UpperCamelCase : int = 0
_UpperCamelCase : int = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCamelCase : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[str]=0 , __a : Optional[Any]=False , __a : Optional[Any]="cpu" , __a : int=torch.floataa , __a : Union[str, Any]=(1, 3, 64, 64) ) -> Tuple:
_UpperCamelCase : Optional[int] = torch.manual_seed(__a )
_UpperCamelCase : Optional[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_UpperCamelCase : List[str] = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
_UpperCamelCase : List[Any] = latents
return inputs
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str=0 , __a : int="cpu" , __a : List[str]=torch.floataa , __a : Tuple=(1, 3, 64, 64) ) -> Dict:
if type(__a ) == str:
_UpperCamelCase : int = torch.device(__a )
_UpperCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : List[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = self.get_inputs()
_UpperCamelCase : List[str] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : List[str] = self.get_inputs()
_UpperCamelCase : List[str] = 1
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Any = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Optional[int] = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_UpperCamelCase : Union[str, Any] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Any = image[0, -3:, -3:, -1]
_UpperCamelCase : Tuple = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : List[Any] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
_UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_UpperCamelCase : Optional[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : int = self.get_inputs(get_fixed_latents=__a , device=__a )
_UpperCamelCase : Any = 1
_UpperCamelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_UpperCamelCase : int = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 706
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 0
_UpperCamelCase : List[str] = len(lowercase_ )
for i in range(n - 1 ):
for j in range(i + 1 ,lowercase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return arr, 0
_UpperCamelCase : Union[str, Any] = len(lowercase_ ) // 2
_UpperCamelCase : Union[str, Any] = arr[0:mid]
_UpperCamelCase : Optional[int] = arr[mid:]
_UpperCamelCase : Dict = count_inversions_recursive(lowercase_ )
_UpperCamelCase : Optional[int] = count_inversions_recursive(lowercase_ )
_UpperCamelCase : str = _count_cross_inversions(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = 0
while i < len(lowercase_ ) and j < len(lowercase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCamelCase : Tuple = count_inversions_bf(lowercase_ )
_UpperCamelCase : Dict = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " ,lowercase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCamelCase : Union[str, Any] = count_inversions_bf(lowercase_ )
_UpperCamelCase : Tuple = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,lowercase_ )
# an empty list should also have zero inversions
_UpperCamelCase : str = []
_UpperCamelCase : Dict = count_inversions_bf(lowercase_ )
_UpperCamelCase : List[Any] = count_inversions_recursive(lowercase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " ,lowercase_ )
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 0
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version("torch"))
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
_UpperCamelCase : Optional[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Tuple = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ ,parse(lowercase_ ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
return compare_versions(lowercase_ ,lowercase_ ,lowercase_ )
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
from __future__ import annotations
import math
lowerCamelCase__ = "2020.9.26"
lowerCamelCase__ = "xcodz-dot, cclaus, dhruvmanila"
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(lowercase_ ,(float, int) ) for val in locals().values() ):
_UpperCamelCase : Any = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(lowercase_ )
_UpperCamelCase : Tuple = ((x * distance) / (z + distance)) * scale
_UpperCamelCase : Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("Axis must be a str" )
_UpperCamelCase : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(lowercase_ ,(float, int) ) for val in input_variables.values() ):
_UpperCamelCase : Any = (
"Input values except axis must either be float or int: "
F'''{list(input_variables.values() )}'''
)
raise TypeError(lowercase_ )
_UpperCamelCase : List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCamelCase : Tuple = x * math.cos(lowercase_ ) - y * math.sin(lowercase_ )
_UpperCamelCase : str = y * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = z
elif axis == "x":
_UpperCamelCase : Any = y * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
_UpperCamelCase : List[Any] = z * math.cos(lowercase_ ) + y * math.sin(lowercase_ )
_UpperCamelCase : Optional[Any] = x
elif axis == "y":
_UpperCamelCase : Any = x * math.cos(lowercase_ ) - z * math.sin(lowercase_ )
_UpperCamelCase : Dict = z * math.cos(lowercase_ ) + x * math.sin(lowercase_ )
_UpperCamelCase : List[Any] = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = torch.load(lowercase_ ,map_location="cpu" )
if "model" in sd.keys():
_UpperCamelCase : Union[str, Any] = torch.load(lowercase_ ,map_location="cpu" )["model"]
# pop unnecessary weights
_UpperCamelCase : List[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_UpperCamelCase : str = sd.pop(lowercase_ )
_UpperCamelCase : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_UpperCamelCase : List[str] = sd[key]
# We split QKV in separate Q,K,V
_UpperCamelCase : Tuple = key.replace(".qkv_proj." ,".q_proj." )
_UpperCamelCase : Any = key.replace(".qkv_proj." ,".k_proj." )
_UpperCamelCase : Tuple = key.replace(".qkv_proj." ,".v_proj." )
_UpperCamelCase : Optional[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_UpperCamelCase : str = torch.split(lowercase_ ,depth // 3 ,dim=0 )
_UpperCamelCase : int = q
_UpperCamelCase : Optional[Any] = k
_UpperCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : List[Any] = load_checkpoint(lowercase_ )
if config is not None:
_UpperCamelCase : Tuple = OPTConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Union[str, Any] = OPTConfig()
_UpperCamelCase : List[Any] = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCamelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = TFAutoModel.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[str] = AutoModel.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = TFAutoModelForPreTraining.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Dict = AutoModelForPreTraining.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase : int = TFAutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase : str = AutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : str = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[str] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__a , from_pt=__a )
_UpperCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__a , from_tf=__a )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = TFAutoModelForSequenceClassification.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_UpperCamelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__a , from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_UpperCamelCase : str = AutoModelForQuestionAnswering.from_pretrained(__a , from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
_UpperCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
_UpperCamelCase : Any = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_4410 )
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" ,type=lowercase_ ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,)
parser.add_argument(
"--evaluation_set" ,type=lowercase_ ,help="where to store parsed evaluation_set file" ,)
parser.add_argument(
"--gold_data_path" ,type=lowercase_ ,help="where to store parsed gold_data_path file" ,)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open(
args.gold_data_path ,"w" ) as gold_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
for dpr_record in tqdm(lowercase_ ):
_UpperCamelCase : str = dpr_record["question"]
_UpperCamelCase : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowercase_ ) + "\n" )
if __name__ == "__main__":
main()
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Tuple = {}
SCREAMING_SNAKE_CASE__ :Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
super().setUp()
# fmt: off
_UpperCamelCase : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCamelCase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , **__a : Any ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : Tuple = "tester"
_UpperCamelCase : Optional[int] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCamelCase : List[str] = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_UpperCamelCase : str = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
_UpperCamelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Dict = self.get_input_output_texts(__a )
_UpperCamelCase : Tuple = tokenizer.tokenize(__a )
_UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : str = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
_UpperCamelCase : Dict = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(" " , "" ) , __a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Tuple = model_type_to_module_name(lowercase_ )
_UpperCamelCase : Dict = importlib.import_module(F'''.{module_name}''' ,"transformers.models" )
try:
return getattr(lowercase_ ,lowercase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase_ ,"__name__" ,lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : Tuple = importlib.import_module("transformers" )
if hasattr(lowercase_ ,lowercase_ ):
return getattr(lowercase_ ,lowercase_ )
return None
def lowercase__ ( lowercase_ ,lowercase_ = None ,lowercase_ = False ,lowercase_ = False ,lowercase_ = None ,lowercase_ = None ,lowercase_ = None ,lowercase_ = False ,**lowercase_ ,) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = get_file_from_repo(
lowercase_ ,lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,resume_download=lowercase_ ,proxies=lowercase_ ,use_auth_token=lowercase_ ,revision=lowercase_ ,local_files_only=lowercase_ ,)
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowercase_ ,encoding="utf-8" ) as reader:
return json.load(lowercase_ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int ) -> List[Any]:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__a )
def __SCREAMING_SNAKE_CASE ( cls : str , __a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : Tuple = kwargs.pop("config" , __a )
_UpperCamelCase : List[str] = kwargs.pop("trust_remote_code" , __a )
_UpperCamelCase : Dict = True
_UpperCamelCase : Union[str, Any] = ImageProcessingMixin.get_image_processor_dict(__a , **__a )
_UpperCamelCase : Optional[int] = config_dict.get("image_processor_type" , __a )
_UpperCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCamelCase : Dict = config_dict.pop("feature_extractor_type" , __a )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_UpperCamelCase : List[str] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCamelCase : Tuple = config_dict["auto_map"]["AutoFeatureExtractor"]
_UpperCamelCase : Optional[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__a , __a ):
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.image_processor_type``
_UpperCamelCase : Dict = getattr(__a , "image_processor_type" , __a )
if hasattr(__a , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCamelCase : Tuple = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_UpperCamelCase : Optional[int] = image_processor_class_from_name(__a )
_UpperCamelCase : Any = image_processor_auto_map is not None
_UpperCamelCase : str = image_processor_class is not None or type(__a ) in IMAGE_PROCESSOR_MAPPING
_UpperCamelCase : int = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
_UpperCamelCase : Dict = get_class_from_dynamic_module(
__a , __a , **__a )
_UpperCamelCase : int = kwargs.pop("code_revision" , __a )
if os.path.isdir(__a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__a , **__a )
elif image_processor_class is not None:
return image_processor_class.from_dict(__a , **__a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__a ) in IMAGE_PROCESSOR_MAPPING:
_UpperCamelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(__a )]
return image_processor_class.from_dict(__a , **__a )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : List[str] , __a : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__a , __a )
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = VQModel
SCREAMING_SNAKE_CASE__ :List[Any] = "sample"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple=(32, 32) ) -> int:
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return (3, 32, 32)
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
return (3, 32, 32)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Tuple = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
_UpperCamelCase : Any = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
_UpperCamelCase : List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
_UpperCamelCase : Dict = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCamelCase : Dict = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCamelCase : str = image.to(__a )
with torch.no_grad():
_UpperCamelCase : List[str] = model(__a ).sample
_UpperCamelCase : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase : str = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = SwinConfig(
embed_dim=192 ,depths=(2, 2, 18, 2) ,num_heads=(6, 12, 24, 48) ,window_size=12 ,out_features=["stage2", "stage3", "stage4"] ,)
_UpperCamelCase : List[Any] = DetaConfig(
backbone_config=lowercase_ ,num_queries=900 ,encoder_ffn_dim=2_048 ,decoder_ffn_dim=2_048 ,num_feature_levels=5 ,assign_first_stage=lowercase_ ,with_box_refine=lowercase_ ,two_stage=lowercase_ ,)
# set labels
_UpperCamelCase : int = "huggingface/label-files"
if "o365" in model_name:
_UpperCamelCase : Optional[Any] = 366
_UpperCamelCase : List[str] = "object365-id2label.json"
else:
_UpperCamelCase : List[str] = 91
_UpperCamelCase : List[str] = "coco-detection-id2label.json"
_UpperCamelCase : Union[str, Any] = num_labels
_UpperCamelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(lowercase_ ,lowercase_ ,repo_type="dataset" ) ) ,"r" ) )
_UpperCamelCase : Any = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : Tuple = idalabel
_UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = dct.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = val
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase : Optional[Any] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase : Optional[int] = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : int = in_proj_weight[:dim, :]
_UpperCamelCase : Any = in_proj_bias[: dim]
_UpperCamelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
_UpperCamelCase : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : str = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase : Optional[int] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[:hidden_size, :]
_UpperCamelCase : Dict = in_proj_bias[:hidden_size]
_UpperCamelCase : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase : int = in_proj_weight[-hidden_size:, :]
_UpperCamelCase : int = in_proj_bias[-hidden_size:]
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : int = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_deta_config(lowercase_ )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase : Optional[int] = hf_hub_download(repo_id="nielsr/deta-checkpoints" ,filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" ,filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowercase_ ,param.shape )
# rename keys
_UpperCamelCase : Dict = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
read_in_swin_q_k_v(lowercase_ ,config.backbone_config )
read_in_decoder_q_k_v(lowercase_ ,lowercase_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : Tuple = val
if "input_proj" in key:
_UpperCamelCase : Union[str, Any] = state_dict.pop(lowercase_ )
_UpperCamelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase : List[Any] = state_dict.pop(lowercase_ )
_UpperCamelCase : Optional[Any] = val
# finally, create HuggingFace model and load state dict
_UpperCamelCase : Union[str, Any] = DetaForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
_UpperCamelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowercase_ )
# load image processor
_UpperCamelCase : int = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
_UpperCamelCase : Optional[int] = prepare_img()
_UpperCamelCase : Dict = processor(images=lowercase_ ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = encoding["pixel_values"]
_UpperCamelCase : Any = model(pixel_values.to(lowercase_ ) )
# verify logits
print("Logits:" ,outputs.logits[0, :3, :3] )
print("Boxes:" ,outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase : Tuple = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_UpperCamelCase : List[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase : Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_UpperCamelCase : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] ,expected_logits.to(lowercase_ ) ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,expected_boxes.to(lowercase_ ) ,atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 719
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : str , __a : Union[str, Any]=13 , __a : str=32 , __a : Any=3 , __a : List[str]=4 , __a : int=[10, 20, 30, 40] , __a : int=[2, 2, 3, 2] , __a : Union[str, Any]=True , __a : str=True , __a : Dict=37 , __a : Tuple="gelu" , __a : List[str]=10 , __a : List[str]=0.02 , __a : List[str]=["stage2", "stage3", "stage4"] , __a : Any=[2, 3, 4] , __a : List[Any]=None , ) -> Dict:
_UpperCamelCase : str = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : List[Any] = num_stages
_UpperCamelCase : str = hidden_sizes
_UpperCamelCase : Union[str, Any] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[Any] = out_features
_UpperCamelCase : Any = out_indices
_UpperCamelCase : Any = scope
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
_UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Any ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : Tuple , __a : str ) -> Optional[int]:
_UpperCamelCase : Tuple = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Tuple ) -> Tuple:
_UpperCamelCase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[str] = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : Dict = None
_UpperCamelCase : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : int = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase : int = config_and_inputs
_UpperCamelCase : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :List[Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Dict = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : List[Any] = ConvNextVaModelTester(self )
_UpperCamelCase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Any = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[Any] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[int] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase : Optional[int] = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[int] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
_UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
def check_hidden_states_output(__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] ):
_UpperCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Any = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : Tuple = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__a )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Optional[Any] = prepare_img()
_UpperCamelCase : List[Any] = preprocessor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__a )
# verify the logits
_UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 720
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 0
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase_ ) < version.parse("0.17.0" ):
return method
def wrapper(self ,*lowercase_ ,**lowercase_ ):
if hasattr(self ,"_hf_hook" ) and hasattr(self._hf_hook ,"pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self ,*lowercase_ ,**lowercase_ )
return wrapper
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__a : List[str] , **__a : str ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : Dict , **__a : Dict ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , *__a : Any , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , *__a : List[Any] , **__a : Any ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : str , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = ["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *__a : int , **__a : int ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__a : Tuple , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , *__a : Tuple , **__a : Optional[Any] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : str , *__a : Optional[Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *__a : Optional[int] , **__a : Dict ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , *__a : Optional[Any] , **__a : List[str] ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__a : Optional[Any] , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __SCREAMING_SNAKE_CASE ( metaclass=_UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["torch", "transformers", "onnx"]
def __init__( self : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 700
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : Dict , __a : str=13 , __a : List[str]=7 , __a : str=6 , __a : str=17 , __a : Optional[Any]=23 , __a : Optional[int]=11 , __a : Optional[int]=True , ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Dict = seq_length
_UpperCamelCase : Dict = act_dim
_UpperCamelCase : str = state_dim
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[Any] = max_length
_UpperCamelCase : str = is_training
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
_UpperCamelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_UpperCamelCase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCamelCase : str = floats_tensor((self.batch_size, self.seq_length, 1) )
_UpperCamelCase : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_UpperCamelCase : str = random_attention_mask((self.batch_size, self.seq_length) )
_UpperCamelCase : List[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] , __a : Optional[int] , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : str , __a : Tuple , ) -> Tuple:
_UpperCamelCase : Any = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Tuple = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = ()
SCREAMING_SNAKE_CASE__ :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE__ :Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :List[str] = False
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = DecisionTransformerModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : List[str] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase : List[Any] = 10 # defined by the RL environment, may be normalized
_UpperCamelCase : Tuple = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_UpperCamelCase : Optional[Any] = model.to(__a )
_UpperCamelCase : str = model.config
torch.manual_seed(0 )
_UpperCamelCase : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
_UpperCamelCase : int = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
_UpperCamelCase : List[Any] = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_UpperCamelCase : List[Any] = state
_UpperCamelCase : Any = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
_UpperCamelCase : int = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
_UpperCamelCase : List[Any] = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
_UpperCamelCase : Dict = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
_UpperCamelCase : int = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
_UpperCamelCase : str = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_UpperCamelCase : Any = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_UpperCamelCase : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
_UpperCamelCase : Tuple = action_pred[0, -1]
_UpperCamelCase : Dict = torch.cat([states, state] , dim=1 )
_UpperCamelCase : Dict = returns_to_go[0, -1] - reward
_UpperCamelCase : Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_UpperCamelCase : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 701
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE__ :Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE__ :List[Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE__ :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ :str = frozenset([] )
SCREAMING_SNAKE_CASE__ :Tuple = True
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : Union[str, Any] = (16, 16)
_UpperCamelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=__a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=__a , only_cross_attention=__a , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
_UpperCamelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
_UpperCamelCase : Optional[Any] = EulerDiscreteScheduler(prediction_type="sample" )
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="quick_gelu" , projection_dim=512 , )
_UpperCamelCase : List[str] = CLIPTextModel(__a )
_UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase : Any = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int , __a : Tuple=0 ) -> Dict:
if str(__a ).startswith("mps" ):
_UpperCamelCase : Dict = torch.manual_seed(__a )
else:
_UpperCamelCase : int = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = "cpu"
_UpperCamelCase : Optional[int] = self.get_dummy_components()
_UpperCamelCase : Optional[int] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Any = self.get_dummy_inputs(__a )
_UpperCamelCase : str = pipe(**__a ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCamelCase : Optional[int] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
_UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
super().test_save_load_local(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : Dict = self.pipeline_class(**__a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : Tuple = self.get_dummy_inputs(__a )
_UpperCamelCase : Dict = 2
_UpperCamelCase : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCamelCase : Tuple = getattr(__a , scheduler_enum.name )
_UpperCamelCase : Optional[int] = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCamelCase : List[str] = pipe(**__a )[0]
outputs.append(__a )
assert check_same_shape(__a )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : List[Any] = torch.manual_seed(33 )
_UpperCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
_UpperCamelCase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : Any = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_UpperCamelCase : Optional[int] = pipe(__a , generator=__a , output_type="latent" ).images
_UpperCamelCase : Any = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
_UpperCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : int = torch.manual_seed(33 )
_UpperCamelCase : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
_UpperCamelCase : Optional[int] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_UpperCamelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
_UpperCamelCase : str = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
_UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase__ ( lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : int = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
_UpperCamelCase : Any = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw ).convert("RGB" )
_UpperCamelCase : Dict = transforms.Compose(
[
transforms.Resize((image_size, image_size) ,interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) ,(0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
_UpperCamelCase : int = transform(lowercase_ ).unsqueeze(0 ).to(lowercase_ )
return image
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "visual_encoder" in key:
_UpperCamelCase : List[Any] = re.sub("visual_encoder*" ,"vision_model.encoder" ,lowercase_ )
if "blocks" in key:
_UpperCamelCase : str = re.sub(r"blocks" ,"layers" ,lowercase_ )
if "attn" in key:
_UpperCamelCase : Union[str, Any] = re.sub(r"attn" ,"self_attn" ,lowercase_ )
if "norm1" in key:
_UpperCamelCase : int = re.sub(r"norm1" ,"layer_norm1" ,lowercase_ )
if "norm2" in key:
_UpperCamelCase : Optional[int] = re.sub(r"norm2" ,"layer_norm2" ,lowercase_ )
if "encoder.norm" in key:
_UpperCamelCase : Tuple = re.sub(r"encoder.norm" ,"post_layernorm" ,lowercase_ )
if "encoder.patch_embed.proj" in key:
_UpperCamelCase : Dict = re.sub(r"encoder.patch_embed.proj" ,"embeddings.patch_embedding" ,lowercase_ )
if "encoder.pos_embed" in key:
_UpperCamelCase : List[Any] = re.sub(r"encoder.pos_embed" ,"embeddings.position_embedding" ,lowercase_ )
if "encoder.cls_token" in key:
_UpperCamelCase : int = re.sub(r"encoder.cls_token" ,"embeddings.class_embedding" ,lowercase_ )
if "self_attn" in key:
_UpperCamelCase : Optional[int] = re.sub(r"self_attn.proj" ,"self_attn.projection" ,lowercase_ )
return key
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ) -> Tuple:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase : Dict = BlipConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : List[str] = BlipConfig(projection_dim=512 ,text_config={} ,vision_config={} )
_UpperCamelCase : Optional[int] = BlipForConditionalGeneration(lowercase_ ).eval()
_UpperCamelCase : Dict = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
_UpperCamelCase : Union[str, Any] = blip_decoder(pretrained=lowercase_ ,image_size=384 ,vit="base" )
_UpperCamelCase : Union[str, Any] = pt_model.eval()
_UpperCamelCase : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : List[Any] = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_key(lowercase_ )
_UpperCamelCase : Any = value
hf_model.load_state_dict(lowercase_ )
_UpperCamelCase : Optional[int] = 384
_UpperCamelCase : int = load_demo_image(image_size=lowercase_ ,device="cpu" )
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCamelCase : Union[str, Any] = tokenizer(["a picture of"] ).input_ids
_UpperCamelCase : Optional[int] = hf_model.generate(lowercase_ ,lowercase_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_UpperCamelCase : List[str] = hf_model.generate(lowercase_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCamelCase : List[Any] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
_UpperCamelCase : List[str] = blip_vqa(pretrained=lowercase_ ,image_size=lowercase_ ,vit="base" )
vqa_model.eval()
_UpperCamelCase : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Any = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : Any = rename_key(lowercase_ )
_UpperCamelCase : Union[str, Any] = value
_UpperCamelCase : List[Any] = BlipForQuestionAnswering(lowercase_ )
hf_vqa_model.load_state_dict(lowercase_ )
_UpperCamelCase : Any = ["How many dogs are in this image?"]
_UpperCamelCase : int = tokenizer(lowercase_ ,return_tensors="pt" ).input_ids
_UpperCamelCase : Tuple = hf_vqa_model.generate(lowercase_ ,lowercase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
_UpperCamelCase : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
_UpperCamelCase : Optional[Any] = blip_itm(pretrained=lowercase_ ,image_size=lowercase_ ,vit="base" )
itm_model.eval()
_UpperCamelCase : str = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase : Optional[Any] = modified_state_dict.pop(lowercase_ )
_UpperCamelCase : Tuple = rename_key(lowercase_ )
_UpperCamelCase : List[str] = value
_UpperCamelCase : Union[str, Any] = BlipForImageTextRetrieval(lowercase_ )
_UpperCamelCase : Dict = ["A picture of a woman with a dog sitting in a beach"]
_UpperCamelCase : List[Any] = tokenizer(
lowercase_ ,return_tensors="pt" ,padding="max_length" ,truncation=lowercase_ ,max_length=35 ,).input_ids
hf_itm_model.load_state_dict(lowercase_ )
hf_itm_model.eval()
_UpperCamelCase : Optional[Any] = hf_itm_model(lowercase_ ,lowercase_ ,use_itm_head=lowercase_ )
_UpperCamelCase : Any = hf_itm_model(lowercase_ ,lowercase_ ,use_itm_head=lowercase_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] ,dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 703
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : Union[str, Any]=13 , __a : int=7 , __a : Optional[int]=True , __a : List[str]=True , __a : List[str]=False , __a : Optional[int]=True , __a : List[Any]=99 , __a : Union[str, Any]=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Optional[int]="gelu" , __a : Any=0.1 , __a : List[Any]=0.1 , __a : Union[str, Any]=512 , __a : Tuple=16 , __a : Union[str, Any]=2 , __a : List[Any]=0.02 , __a : Tuple=3 , __a : Dict=4 , __a : List[Any]=None , ) -> Any:
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : int = use_input_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Union[str, Any] = num_labels
_UpperCamelCase : Optional[Any] = num_choices
_UpperCamelCase : int = scope
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any , __a : str , __a : int , __a : List[Any] , __a : int , __a : Any ) -> str:
_UpperCamelCase : Any = DistilBertModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a , __a )
_UpperCamelCase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : Optional[Any] , __a : List[Any] , __a : List[str] , __a : Optional[Any] , __a : Any ) -> int:
_UpperCamelCase : List[Any] = DistilBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Tuple , __a : Tuple , __a : int , __a : Dict , __a : Tuple , __a : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int , __a : Any , __a : Optional[int] , __a : Optional[int] , __a : int , __a : List[str] ) -> Optional[int]:
_UpperCamelCase : str = self.num_labels
_UpperCamelCase : List[str] = DistilBertForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : str , __a : Optional[Any] , __a : Tuple , __a : Dict ) -> str:
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Dict = DistilBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : List[Any] , __a : int , __a : Dict , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Tuple = self.num_choices
_UpperCamelCase : List[Any] = DistilBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : int = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
(_UpperCamelCase) : List[str] = config_and_inputs
_UpperCamelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ :Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :List[str] = True
SCREAMING_SNAKE_CASE__ :Optional[int] = True
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
SCREAMING_SNAKE_CASE__ :str = True
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Tuple = DistilBertModelTester(self )
_UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Optional[int] = DistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCamelCase : Tuple = True
_UpperCamelCase : Tuple = model_class(config=__a )
_UpperCamelCase : int = self._prepare_for_class(__a , __a )
_UpperCamelCase : str = torch.jit.trace(
__a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , "traced_model.pt" ) )
_UpperCamelCase : Optional[int] = torch.jit.load(os.path.join(__a , "traced_model.pt" ) , map_location=__a )
loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCamelCase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase : Any = model(__a , attention_mask=__a )[0]
_UpperCamelCase : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 704
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *__a : Optional[int] , **__a : Dict ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 0
|
"""simple docstring"""
import os
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(lowercase_ ) + "/p022_names.txt" ) as file:
_UpperCamelCase : Optional[Any] = str(file.readlines()[0] )
_UpperCamelCase : Any = names.replace("\"" ,"" ).split("," )
names.sort()
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Union[str, Any] = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_UpperCamelCase : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 706
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 707
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : Optional[int] , __a : List[str]=13 , __a : Optional[Any]=3 , __a : Optional[Any]=224 , __a : str=30 , __a : Optional[Any]=400 , __a : List[Any]=True , __a : Any=None , __a : Union[str, Any]=True , __a : Dict=[0.5, 0.5, 0.5] , __a : str=[0.5, 0.5, 0.5] , ) -> Tuple:
_UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
_UpperCamelCase : Dict = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Optional[int] = min_resolution
_UpperCamelCase : int = max_resolution
_UpperCamelCase : str = do_resize
_UpperCamelCase : Tuple = size
_UpperCamelCase : int = do_normalize
_UpperCamelCase : int = image_mean
_UpperCamelCase : Tuple = image_std
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ViTImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : str = EfficientFormerImageProcessorTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return self.image_proc_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
_UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size" ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
# Initialize image_processor
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Any = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
_UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# Initialize image_processor
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_UpperCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
for i in range(length - 1 ):
_UpperCamelCase : Optional[int] = i
for k in range(i + 1 ,lowercase_ ):
if collection[k] < collection[least]:
_UpperCamelCase : List[str] = k
if least != i:
_UpperCamelCase : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __a : str , __a : List[Any]=2 , __a : int=8 , __a : Dict=True , __a : Tuple=True , __a : List[str]=True , __a : Dict=True , __a : List[str]=99 , __a : Union[str, Any]=16 , __a : Any=5 , __a : Any=2 , __a : Any=36 , __a : Tuple="gelu" , __a : Dict=0.0 , __a : Optional[int]=0.0 , __a : str=512 , __a : Optional[int]=16 , __a : Dict=2 , __a : Dict=0.02 , __a : Any=3 , __a : List[str]=4 , __a : Optional[int]=None , ) -> Any:
_UpperCamelCase : Any = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Optional[int] = seq_length
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Optional[int] = use_input_mask
_UpperCamelCase : Dict = use_token_type_ids
_UpperCamelCase : int = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : Optional[Any] = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Tuple = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : Dict = num_choices
_UpperCamelCase : Any = scope
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : List[str] = None
if self.use_token_type_ids:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase : List[Any] = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : List[Any] = self.get_config()
_UpperCamelCase : List[str] = 300
return config
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
(
_UpperCamelCase
) : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[str] , __a : Tuple , __a : Tuple , __a : List[Any] , __a : Optional[int] , __a : List[str] , __a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = MraModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a )
_UpperCamelCase : str = model(__a , token_type_ids=__a )
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any , __a : Optional[Any] , __a : int , __a : List[str] , __a : str , __a : Tuple , __a : Optional[int] , __a : Optional[int] , __a : int , ) -> int:
_UpperCamelCase : List[Any] = True
_UpperCamelCase : str = MraModel(__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_UpperCamelCase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
_UpperCamelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : int , __a : List[str] , __a : int , __a : int ) -> int:
_UpperCamelCase : Tuple = MraForMaskedLM(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int , __a : int , __a : int , __a : Any , __a : List[str] , __a : List[str] , __a : Dict ) -> str:
_UpperCamelCase : Union[str, Any] = MraForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Dict , __a : Any , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : Any ) -> Optional[Any]:
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = MraForSequenceClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : str , __a : Tuple , __a : int , __a : Any , __a : Optional[int] , __a : str , __a : str ) -> int:
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Tuple = MraForTokenClassification(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : Any , __a : Optional[int] , __a : str , __a : Any , __a : Any ) -> str:
_UpperCamelCase : Tuple = self.num_choices
_UpperCamelCase : Optional[int] = MraForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase : Optional[int] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
_UpperCamelCase
) : int = config_and_inputs
_UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = False
SCREAMING_SNAKE_CASE__ :List[Any] = ()
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Optional[int] = MraModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Tuple = MraModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="MRA does not output attentions" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
_UpperCamelCase : Tuple = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCamelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Dict = model(__a )[0]
_UpperCamelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCamelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(__a )[0]
_UpperCamelCase : int = 5_0265
_UpperCamelCase : str = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : List[str] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
_UpperCamelCase : Dict = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_UpperCamelCase : Any = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCamelCase : Any = model(__a )[0]
_UpperCamelCase : Any = 5_0265
_UpperCamelCase : str = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __a )
_UpperCamelCase : List[str] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return len(set(lowercase_ ) ) == len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowerCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "whisper"
SCREAMING_SNAKE_CASE__ :Dict = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , __a : Dict=5_1865 , __a : Tuple=80 , __a : Optional[Any]=6 , __a : List[Any]=4 , __a : Any=6 , __a : Tuple=4 , __a : List[str]=1536 , __a : str=1536 , __a : Any=0.0 , __a : List[str]=0.0 , __a : Dict=5_0257 , __a : str=True , __a : Optional[Any]=True , __a : Optional[Any]="gelu" , __a : Optional[int]=256 , __a : List[Any]=0.0 , __a : Dict=0.0 , __a : List[Any]=0.0 , __a : int=0.02 , __a : Optional[Any]=False , __a : Optional[Any]=1500 , __a : List[str]=448 , __a : Union[str, Any]=5_0256 , __a : List[str]=5_0256 , __a : Any=5_0256 , __a : str=None , __a : List[Any]=[220, 5_0256] , __a : int=False , __a : str=256 , __a : Any=False , __a : Optional[Any]=0.05 , __a : Union[str, Any]=10 , __a : int=2 , __a : int=0.0 , __a : Optional[int]=10 , __a : str=0 , __a : Any=7 , **__a : Tuple , ) -> Tuple:
_UpperCamelCase : Tuple = vocab_size
_UpperCamelCase : List[Any] = num_mel_bins
_UpperCamelCase : Union[str, Any] = d_model
_UpperCamelCase : Tuple = encoder_layers
_UpperCamelCase : Any = encoder_attention_heads
_UpperCamelCase : List[Any] = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : Dict = decoder_ffn_dim
_UpperCamelCase : Tuple = encoder_ffn_dim
_UpperCamelCase : Tuple = dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : int = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : str = init_std
_UpperCamelCase : List[Any] = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : List[str] = encoder_layers
_UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Dict = max_source_positions
_UpperCamelCase : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : Union[str, Any] = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : Tuple = apply_spec_augment
_UpperCamelCase : List[Any] = mask_time_prob
_UpperCamelCase : List[Any] = mask_time_length
_UpperCamelCase : Tuple = mask_time_min_masks
_UpperCamelCase : List[Any] = mask_feature_prob
_UpperCamelCase : str = mask_feature_length
_UpperCamelCase : int = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : int = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
_UpperCamelCase : Union[str, Any] = {0: "batch"}
else:
_UpperCamelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 2_2050 , __a : float = 5.0 , __a : int = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : str = OrderedDict()
_UpperCamelCase : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
_UpperCamelCase : List[str] = encoder_inputs["input_features"].shape[2]
_UpperCamelCase : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
_UpperCamelCase : int = encoder_inputs.pop("input_features" )
_UpperCamelCase : List[Any] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : Optional[int] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-3
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = "speech_to_text"
SCREAMING_SNAKE_CASE__ :Any = ["past_key_values"]
SCREAMING_SNAKE_CASE__ :int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , __a : Optional[int]=1_0000 , __a : Optional[Any]=12 , __a : Optional[int]=2048 , __a : Optional[int]=4 , __a : List[Any]=6 , __a : Optional[Any]=2048 , __a : List[str]=4 , __a : Dict=0.0 , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=True , __a : int=True , __a : Union[str, Any]="relu" , __a : Union[str, Any]=256 , __a : List[str]=0.1 , __a : Any=0.0 , __a : Any=0.0 , __a : List[str]=0.02 , __a : List[str]=2 , __a : Optional[int]=True , __a : int=1 , __a : Optional[int]=0 , __a : Dict=2 , __a : Tuple=6000 , __a : Optional[Any]=1024 , __a : Union[str, Any]=2 , __a : str=(5, 5) , __a : Dict=1024 , __a : List[Any]=80 , __a : Dict=1 , **__a : Optional[int] , ) -> Optional[int]:
_UpperCamelCase : List[str] = vocab_size
_UpperCamelCase : Any = d_model
_UpperCamelCase : Tuple = encoder_ffn_dim
_UpperCamelCase : List[str] = encoder_layers
_UpperCamelCase : str = encoder_attention_heads
_UpperCamelCase : List[str] = decoder_ffn_dim
_UpperCamelCase : Any = decoder_layers
_UpperCamelCase : List[str] = decoder_attention_heads
_UpperCamelCase : Union[str, Any] = dropout
_UpperCamelCase : List[str] = attention_dropout
_UpperCamelCase : Optional[int] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : Dict = init_std
_UpperCamelCase : List[Any] = encoder_layerdrop
_UpperCamelCase : Optional[int] = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
_UpperCamelCase : Optional[Any] = num_conv_layers
_UpperCamelCase : Tuple = list(__a )
_UpperCamelCase : Optional[int] = conv_channels
_UpperCamelCase : List[Any] = input_feat_per_channel
_UpperCamelCase : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
_UpperCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
_UpperCamelCase : Any = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
_UpperCamelCase : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : Any = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
_UpperCamelCase : Optional[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
# pass variant but use the non-variant filenames
_UpperCamelCase : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_UpperCamelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase : Tuple = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : int = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_UpperCamelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
# pass variant but use the non-variant filenames
_UpperCamelCase : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_UpperCamelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_UpperCamelCase : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : List[Any] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = name
_UpperCamelCase : Tuple = val
def __str__( self : Dict ) -> Any:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[str] , __a : Any ) -> List[Any]:
return self.val < other.val
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Union[str, Any] = self.build_heap(__a )
def __getitem__( self : Any , __a : List[str] ) -> Optional[int]:
return self.get_value(__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[str] ) -> List[Any]:
return (idx - 1) // 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[str] ) -> str:
return idx * 2 + 1
def __SCREAMING_SNAKE_CASE ( self : str , __a : Dict ) -> int:
return idx * 2 + 2
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Any ) -> Optional[int]:
return self.heap_dict[key]
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[int] = len(__a ) - 1
_UpperCamelCase : Any = self.get_parent_idx(__a )
for idx, i in enumerate(__a ):
_UpperCamelCase : Tuple = idx
_UpperCamelCase : Union[str, Any] = i.val
for i in range(__a , -1 , -1 ):
self.sift_down(__a , __a )
return array
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Tuple ) -> List[Any]:
while True:
_UpperCamelCase : Dict = self.get_left_child_idx(__a ) # noqa: E741
_UpperCamelCase : Any = self.get_right_child_idx(__a )
_UpperCamelCase : Tuple = idx
if l < len(__a ) and array[l] < array[idx]:
_UpperCamelCase : Tuple = l
if r < len(__a ) and array[r] < array[smallest]:
_UpperCamelCase : str = r
if smallest != idx:
_UpperCamelCase : Optional[int] = array[smallest], array[idx]
(
_UpperCamelCase
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_UpperCamelCase : int = smallest
else:
break
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> Tuple:
_UpperCamelCase : List[str] = self.get_parent_idx(__a )
while p >= 0 and self.heap[p] > self.heap[idx]:
_UpperCamelCase : Optional[Any] = self.heap[idx], self.heap[p]
_UpperCamelCase : Dict = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_UpperCamelCase : List[str] = p
_UpperCamelCase : Optional[Any] = self.get_parent_idx(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.heap[0]
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : str = self.heap[-1], self.heap[0]
_UpperCamelCase : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_UpperCamelCase : Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> str:
self.heap.append(__a )
_UpperCamelCase : int = len(self.heap ) - 1
_UpperCamelCase : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return len(self.heap ) == 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Optional[int] , __a : int ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_UpperCamelCase : Union[str, Any] = new_value
_UpperCamelCase : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
lowerCamelCase__ = Node("R", -1)
lowerCamelCase__ = Node("B", 6)
lowerCamelCase__ = Node("A", 3)
lowerCamelCase__ = Node("X", 1)
lowerCamelCase__ = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase_ )
if n > 1:
factors.append(lowercase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 0
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 720
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 0
|
"""simple docstring"""
import string
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = ""
for i in sequence:
_UpperCamelCase : List[str] = ord(lowercase_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = string.ascii_letters
_UpperCamelCase : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase_ )] if c in letters else c for c in sequence )
def lowercase__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running performance benchmarks..." )
_UpperCamelCase : List[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=lowercase_ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' ,setup=lowercase_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 700
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any ) -> Union[str, Any]:
_UpperCamelCase : int = {}
def __SCREAMING_SNAKE_CASE ( self : int , __a : str ) -> None:
_UpperCamelCase : Union[str, Any] = {}
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str , __a : str , __a : float ) -> None:
if nodea not in self.connections:
self.add_node(__a )
if nodea not in self.connections:
self.add_node(__a )
_UpperCamelCase : Dict = probability
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> list[str]:
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : str ) -> str:
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> dict[str, int]:
"""simple docstring"""
_UpperCamelCase : Tuple = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = Counter(graph.get_nodes() )
_UpperCamelCase : Union[str, Any] = start
for _ in range(lowercase_ ):
_UpperCamelCase : List[str] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
| 0
|
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = [1]
_UpperCamelCase : List[str] = 0, 0, 0
_UpperCamelCase : str = ugly_nums[ia] * 2
_UpperCamelCase : int = ugly_nums[ia] * 3
_UpperCamelCase : Tuple = ugly_nums[ia] * 5
for _ in range(1 ,lowercase_ ):
_UpperCamelCase : Optional[int] = min(lowercase_ ,lowercase_ ,lowercase_ )
ugly_nums.append(lowercase_ )
if next_num == next_a:
ia += 1
_UpperCamelCase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCamelCase : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCamelCase : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 51
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : int , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Optional[Any] = size if size is not None else {"height": 384, "width": 384}
_UpperCamelCase : str = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : List[str] = size
_UpperCamelCase : Dict = resample
_UpperCamelCase : List[str] = do_rescale
_UpperCamelCase : str = rescale_factor
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : str = do_convert_rgb
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
_UpperCamelCase : int = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_UpperCamelCase : str = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> List[str]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : bool = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Tuple , ) -> PIL.Image.Image:
_UpperCamelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Dict = resample if resample is not None else self.resample
_UpperCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Union[str, Any] = size if size is not None else self.size
_UpperCamelCase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : List[Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : List[str] = [to_numpy_array(__a ) for image in images]
if do_resize:
_UpperCamelCase : List[str] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
_UpperCamelCase : int = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_UpperCamelCase : int = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(__a , __a ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
return encoded_outputs
| 703
|
"""simple docstring"""
lowerCamelCase__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 51
| 0
|
"""simple docstring"""
from math import isqrt
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 ,isqrt(lowercase_ ) + 1 ) )
def lowercase__ ( lowercase_ = 10**6 ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : str = 5
# Realm tok
_UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
_UpperCamelCase : Optional[Any] = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : Any = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : int = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : Tuple = self.get_config()
_UpperCamelCase : int = self.get_dummy_retriever()
_UpperCamelCase : Tuple = retriever.tokenizer
_UpperCamelCase : List[str] = np.array([0, 3] , dtype="long" )
_UpperCamelCase : Union[str, Any] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : List[str] = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : str = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Any = self.get_config()
_UpperCamelCase : Dict = self.get_dummy_retriever()
_UpperCamelCase : Dict = retriever.tokenizer
_UpperCamelCase : List[Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase : Optional[int] = tokenizer(["Test question"] ).input_ids
_UpperCamelCase : str = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
_UpperCamelCase : Union[str, Any] = config.reader_seq_len
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
_UpperCamelCase : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase : int = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase : int = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_UpperCamelCase )} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE__ :Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
SCREAMING_SNAKE_CASE__ :bool = field(default=_UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."} )
SCREAMING_SNAKE_CASE__ :float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
SCREAMING_SNAKE_CASE__ :float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
SCREAMING_SNAKE_CASE__ :int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
SCREAMING_SNAKE_CASE__ :int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
SCREAMING_SNAKE_CASE__ :bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ,lowercase_ = None ,) -> List[str]:
"""simple docstring"""
def _dataset(lowercase_ ,lowercase_=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,ref_path=lowercase_ ,)
return LineByLineTextDataset(tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase_ ,file_path=lowercase_ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase_ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def lowercase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
_UpperCamelCase : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_UpperCamelCase : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_UpperCamelCase : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase_ ,cache_dir=model_args.cache_dir ,)
else:
logger.info("Training new model from scratch" )
_UpperCamelCase : List[Any] = AutoModelWithLMHead.from_config(lowercase_ )
model.resize_token_embeddings(len(lowercase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_UpperCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase : Union[str, Any] = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
_UpperCamelCase : int = (
get_dataset(lowercase_ ,tokenizer=lowercase_ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase : int = (
get_dataset(lowercase_ ,tokenizer=lowercase_ ,evaluate=lowercase_ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase : Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase_ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=lowercase_ ,mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=lowercase_ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase : str = Trainer(
model=lowercase_ ,args=lowercase_ ,data_collator=lowercase_ ,train_dataset=lowercase_ ,eval_dataset=lowercase_ ,prediction_loss_only=lowercase_ ,)
# Training
if training_args.do_train:
_UpperCamelCase : Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase : Union[str, Any] = trainer.evaluate()
_UpperCamelCase : Dict = math.exp(eval_output["eval_loss"] )
_UpperCamelCase : int = {"perplexity": perplexity}
_UpperCamelCase : int = os.path.join(training_args.output_dir ,"eval_results_lm.txt" )
if trainer.is_world_master():
with open(lowercase_ ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" ,lowercase_ ,str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(lowercase_ )
return results
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE__ :Dict = None
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = True
SCREAMING_SNAKE_CASE__ :Union[str, Any] = filter_non_english
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
_UpperCamelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCamelCase : List[str] = {}
_UpperCamelCase : Tuple = {}
for i, value in enumerate(__a ):
_UpperCamelCase : List[str] = i
_UpperCamelCase : Optional[Any] = i
_UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
_UpperCamelCase : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : int = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
_UpperCamelCase : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
_UpperCamelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Dict = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCamelCase : str = i
_UpperCamelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Union[str, Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase : Optional[Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
_UpperCamelCase : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[Any] = ["的", "人", "有"]
_UpperCamelCase : int = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCamelCase : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Any = tokenizer_p.encode(__a , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(__a )
_UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCamelCase : Optional[int] = tokenizer.encode("你好" , add_special_tokens=__a )
_UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=__a )
_UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : int = "你好,你是谁"
_UpperCamelCase : Any = tokenizer.tokenize(__a )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[str] = tokenizer.convert_tokens_to_shape_ids(__a )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_pronunciation_ids(__a )
_UpperCamelCase : Optional[int] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
_UpperCamelCase : Tuple = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 51
| 0
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = args.pruning_method
_UpperCamelCase : int = args.threshold
_UpperCamelCase : Optional[Any] = args.model_name_or_path.rstrip("/" )
_UpperCamelCase : str = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
_UpperCamelCase : Dict = torch.load(os.path.join(lowercase_ ,"pytorch_model.bin" ) )
_UpperCamelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCamelCase : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_UpperCamelCase : str = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
_UpperCamelCase : List[str] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_UpperCamelCase : List[Any] = MagnitudeBinarizer.apply(inputs=lowercase_ ,threshold=lowercase_ )
_UpperCamelCase : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCamelCase : Tuple = name[:-6]
_UpperCamelCase : Union[str, Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase : List[str] = TopKBinarizer.apply(lowercase_ ,lowercase_ )
_UpperCamelCase : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCamelCase : List[str] = name[:-6]
_UpperCamelCase : Union[str, Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase : Tuple = ThresholdBinarizer.apply(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCamelCase : str = name[:-6]
_UpperCamelCase : List[Any] = model[F'''{prefix_}mask_scores''']
_UpperCamelCase : Optional[int] = -0.1, 1.1
_UpperCamelCase : Dict = torch.sigmoid(lowercase_ )
_UpperCamelCase : str = s * (r - l) + l
_UpperCamelCase : Any = s_bar.clamp(min=0.0 ,max=1.0 )
_UpperCamelCase : Optional[int] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_UpperCamelCase : int = os.path.join(
os.path.dirname(lowercase_ ) ,F'''bertarized_{os.path.basename(lowercase_ )}''' )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ ,lowercase_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(lowercase_ ,os.path.join(lowercase_ ,"pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 707
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = "yolos"
def __init__( self : Dict , __a : Optional[Any]=768 , __a : List[Any]=12 , __a : Any=12 , __a : List[Any]=3072 , __a : Optional[int]="gelu" , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : Optional[int]=1e-1_2 , __a : List[Any]=[512, 864] , __a : List[str]=16 , __a : str=3 , __a : Optional[Any]=True , __a : Optional[Any]=100 , __a : List[str]=True , __a : Any=False , __a : List[str]=1 , __a : str=5 , __a : Optional[Any]=2 , __a : Tuple=5 , __a : Any=2 , __a : Union[str, Any]=0.1 , **__a : List[str] , ) -> List[str]:
super().__init__(**__a )
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Dict = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Any = qkv_bias
_UpperCamelCase : str = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : List[Any] = class_cost
_UpperCamelCase : int = bbox_cost
_UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCamelCase : List[Any] = bbox_loss_coefficient
_UpperCamelCase : str = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 12
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :int
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : int ) -> Optional[Any]:
_UpperCamelCase : list[list[Edge]] = [[] for _ in range(__a )]
_UpperCamelCase : Tuple = size
def __getitem__( self : int , __a : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self._size
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int , __a : int , __a : int ) -> Dict:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : int ) -> int | None:
_UpperCamelCase : Optional[Any] = deque([start_vertex] )
_UpperCamelCase : list[int | None] = [None] * self.size
_UpperCamelCase : int = 0
while queue:
_UpperCamelCase : List[Any] = queue.popleft()
_UpperCamelCase : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCamelCase : List[Any] = current_distance + edge.weight
_UpperCamelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
_UpperCamelCase : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int
SCREAMING_SNAKE_CASE__ :Node | None = None
SCREAMING_SNAKE_CASE__ :Node | None = None
def lowercase__ ( ) -> Node | None:
"""simple docstring"""
_UpperCamelCase : Tuple = Node(1 )
_UpperCamelCase : int = Node(2 )
_UpperCamelCase : Any = Node(3 )
_UpperCamelCase : List[Any] = Node(4 )
_UpperCamelCase : Tuple = Node(5 )
return tree
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase__ ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def lowercase__ ( lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
if root is None:
return output
_UpperCamelCase : Any = deque([root] )
while process_queue:
_UpperCamelCase : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase__ ( lowercase_ ,lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
def populate_output(lowercase_ ,lowercase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(lowercase_ ,lowercase_ )
return output
def lowercase__ ( lowercase_ ,lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
_UpperCamelCase : list[Any] = []
def populate_output(lowercase_ ,lowercase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(lowercase_ ,lowercase_ )
return output
def lowercase__ ( lowercase_ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
_UpperCamelCase : list[Sequence[Node | None]] = []
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = height(lowercase_ )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase_ ,lowercase_ ) )
_UpperCamelCase : Dict = 1
else:
output.append(get_nodes_from_right_to_left(lowercase_ ,lowercase_ ) )
_UpperCamelCase : List[str] = 0
return output
def lowercase__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = make_tree()
print(F'''In-order Traversal: {inorder(lowercase_ )}''' )
print(F'''Pre-order Traversal: {preorder(lowercase_ )}''' )
print(F'''Post-order Traversal: {postorder(lowercase_ )}''' ,"\n" )
print(F'''Height of Tree: {height(lowercase_ )}''' ,"\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowercase_ ) ,"\n" )
print("Level-wise order Traversal: " )
for level in range(1 ,height(lowercase_ ) + 1 ):
print(F'''Level {level}:''' ,get_nodes_from_left_to_right(lowercase_ ,level=lowercase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
if len(lowercase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase_ ) ):
_UpperCamelCase : Dict = nums.pop(0 )
_UpperCamelCase : Any = permute(lowercase_ )
for perm in permutations:
perm.append(lowercase_ )
result.extend(lowercase_ )
nums.append(lowercase_ )
return result
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
def backtrack(lowercase_ ):
if start == len(lowercase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase_ ,len(lowercase_ ) ):
_UpperCamelCase : Tuple = nums[i], nums[start]
backtrack(start + 1 )
_UpperCamelCase : Any = nums[i], nums[start] # backtrack
_UpperCamelCase : str = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCamelCase__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 711
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase : int = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_UpperCamelCase : List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : List[str] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[str] = model(__a )["last_hidden_state"].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_UpperCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase : Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase : str = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase : List[Any] = model(__a )["last_hidden_state"].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 1_000_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : int = set(range(3 ,lowercase_ ,2 ) )
primes.add(2 )
for p in range(3 ,lowercase_ ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowercase_ ,lowercase_ ) ) )
_UpperCamelCase : Union[str, Any] = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ ,limit + 1 ,lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["OwlViTFeatureExtractor"]
lowerCamelCase__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowerCamelCase__ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowerCamelCase__ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_="binary" ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = simple_accuracy(lowercase_ ,lowercase_ )
_UpperCamelCase : Union[str, Any] = float(fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = {}
for id_pred, label in zip(lowercase_ ,lowercase_ ):
_UpperCamelCase : int = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_UpperCamelCase : List[Any] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase : int = [(pred, label)]
_UpperCamelCase : Optional[Any] = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase : str = zip(*lowercase_ )
_UpperCamelCase : Optional[Any] = fa_score(y_true=lowercase_ ,y_pred=lowercase_ ,average="macro" )
fas.append(lowercase_ )
_UpperCamelCase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
_UpperCamelCase : Optional[Any] = float(sum(lowercase_ ) / len(lowercase_ ) )
_UpperCamelCase : int = sum(lowercase_ ) / len(lowercase_ )
_UpperCamelCase : Union[str, Any] = float(fa_score(y_true=lowercase_ ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : Dict ) -> List[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a , __a )}
elif self.config_name == "cb":
return acc_and_fa(__a , __a , fa_avg="macro" )
elif self.config_name == "record":
_UpperCamelCase : Optional[Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_UpperCamelCase : Optional[int] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a , __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a , __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a , __a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ :List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE__ :Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE__ :int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> Any:
_UpperCamelCase : Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
_UpperCamelCase : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
_UpperCamelCase : Union[str, Any] = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
_UpperCamelCase : int = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
_UpperCamelCase : Tuple = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
_UpperCamelCase : Any = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
_UpperCamelCase : int = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
_UpperCamelCase : List[str] = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
_UpperCamelCase : Optional[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
import torch
_UpperCamelCase : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
_UpperCamelCase : Tuple = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
_UpperCamelCase : List[str] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
_UpperCamelCase : Union[str, Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
_UpperCamelCase : str = pipeline("text-classification" )
_UpperCamelCase : List[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
_UpperCamelCase : Optional[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
_UpperCamelCase : Any = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
_UpperCamelCase : Tuple = pipeline("text-classification" , framework="tf" )
_UpperCamelCase : int = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
_UpperCamelCase : str = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
_UpperCamelCase : Tuple = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any , __a : str , __a : Optional[Any] ) -> int:
_UpperCamelCase : Optional[int] = TextClassificationPipeline(model=__a , tokenizer=__a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple , __a : str ) -> Tuple:
_UpperCamelCase : List[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_UpperCamelCase : List[str] = "HuggingFace is in"
_UpperCamelCase : Tuple = text_classifier(__a )
self.assertEqual(nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
_UpperCamelCase : Dict = ["HuggingFace is in ", "Paris is in France"]
_UpperCamelCase : List[Any] = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}, {"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_UpperCamelCase : List[Any] = text_classifier(__a , top_k=__a )
_UpperCamelCase : Optional[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__a ) , [[{"label": ANY(__a ), "score": ANY(__a )}] * N, [{"label": ANY(__a ), "score": ANY(__a )}] * N] , )
_UpperCamelCase : List[str] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_UpperCamelCase : List[str] = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , {"label": ANY(__a ), "score": ANY(__a )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_UpperCamelCase : Optional[int] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__a ):
text_classifier(__a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_UpperCamelCase : Optional[Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 715
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716
|
"""simple docstring"""
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : list[int] ) -> None:
_UpperCamelCase : Tuple = len(__a )
_UpperCamelCase : Dict = [0] * len_array
if len_array > 0:
_UpperCamelCase : Optional[Any] = array[0]
for i in range(1 , __a ):
_UpperCamelCase : Tuple = self.prefix_sum[i - 1] + array[i]
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : int , __a : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int ) -> bool:
_UpperCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
"""simple docstring"""
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase__ ( lowercase_=None ) -> Any:
"""simple docstring"""
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser("test" )
else:
_UpperCamelCase : str = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=lowercase_ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCamelCase : int = script_name
else:
_UpperCamelCase : Dict = F'''--config_file={args.config_file} {script_name}'''
_UpperCamelCase : str = ["accelerate-launch"] + test_args.split()
_UpperCamelCase : Tuple = execute_subprocess_async(lowercase_ ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : int = test_command_parser()
_UpperCamelCase : Tuple = parser.parse_args()
test_command(lowercase_ )
if __name__ == "__main__":
main()
| 717
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase__ ( lowercase_ ,lowercase_=7 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[int] = None
if token is not None:
_UpperCamelCase : Optional[Any] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
_UpperCamelCase : Any = "636036"
_UpperCamelCase : Tuple = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
_UpperCamelCase : Dict = requests.get(lowercase_ ,headers=lowercase_ ).json()
return result["workflow_runs"]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_daily_ci_runs(lowercase_ )
_UpperCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase : Union[str, Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : str = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
_UpperCamelCase : int = get_artifacts_links(worflow_run_id=lowercase_ ,token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase : Dict = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ ,artifact_url=lowercase_ ,output_dir=lowercase_ ,token=lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
get_last_daily_ci_artifacts(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Dict = {}
for artifact_name in artifact_names:
_UpperCamelCase : Union[str, Any] = os.path.join(lowercase_ ,F'''{artifact_name}.zip''' )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : int = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
_UpperCamelCase : int = f.read().decode("UTF-8" )
return results
| 51
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : str = 0
_UpperCamelCase : str = number
while duplicate > 0:
_UpperCamelCase : int = divmod(lowercase_ ,10 )
fact_sum += factorial(lowercase_ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowerCamelCase__ = int(input("Enter number: ").strip())
print(
f"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 718
|
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Any , __a : list[list[float]] , __a : list[int] ) -> int:
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Union[str, Any] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ) -> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : List[str] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : List[Any] = SelfOrganizingMap()
_UpperCamelCase : int = 3
_UpperCamelCase : List[Any] = 0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
_UpperCamelCase : int = training_samples[j]
# Compute the winning vector
_UpperCamelCase : Tuple = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
# classify test sample
_UpperCamelCase : Optional[int] = [0, 0, 0, 1]
_UpperCamelCase : Dict = self_organizing_map.get_winner(lowercase_ ,lowercase_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 51
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
_UpperCamelCase : List[str] = bs[:]
_UpperCamelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : str = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ ,lowercase_ ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = set()
_UpperCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]="replace" , __a : Any="<s>" , __a : Dict="</s>" , __a : List[str]="</s>" , __a : int="<s>" , __a : Any="<unk>" , __a : str="<pad>" , __a : Any="<mask>" , __a : List[Any]=False , **__a : List[str] , ) -> str:
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
_UpperCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
_UpperCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
_UpperCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
_UpperCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
_UpperCamelCase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
_UpperCamelCase : int = json.load(__a )
_UpperCamelCase : str = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : int = errors # how to handle errors in decoding
_UpperCamelCase : str = bytes_to_unicode()
_UpperCamelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_UpperCamelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
_UpperCamelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : Any = {}
_UpperCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return len(self.encoder )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[Any] ) -> Tuple:
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Optional[int] = tuple(__a )
_UpperCamelCase : List[Any] = get_pairs(__a )
if not pairs:
return token
while True:
_UpperCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase : Union[str, Any] = bigram
_UpperCamelCase : Dict = []
_UpperCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
_UpperCamelCase : Dict = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase : str = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase : str = tuple(__a )
_UpperCamelCase : Optional[int] = new_word
if len(__a ) == 1:
break
else:
_UpperCamelCase : Any = get_pairs(__a )
_UpperCamelCase : str = " ".join(__a )
_UpperCamelCase : Dict = word
return word
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] ) -> Tuple:
_UpperCamelCase : str = []
for token in re.findall(self.pat , __a ):
_UpperCamelCase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : int ) -> Tuple:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Dict ) -> Union[str, Any]:
return self.decoder.get(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple ) -> str:
_UpperCamelCase : Tuple = "".join(__a )
_UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Tuple = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Dict = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_UpperCamelCase : str = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCamelCase : Union[str, Any] = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase : Optional[int] = [self.sep_token_id]
_UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : str , __a : int=False , **__a : List[Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
_UpperCamelCase : List[Any] = " " + text
return (text, kwargs)
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[Dict[str, EncodedInput], BatchEncoding] , __a : Optional[int] = None , __a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a : Optional[int] = None , __a : Optional[bool] = None , ) -> dict:
_UpperCamelCase : Any = super()._pad(
encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase : str = len(encoded_inputs["global_attention_mask"] ) != len(__a )
if needs_to_be_padded:
_UpperCamelCase : List[str] = len(__a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase : str = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 719
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ = "src/transformers"
lowerCamelCase__ = "docs/source/en"
lowerCamelCase__ = "."
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCamelCase : Dict = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
_UpperCamelCase : Optional[int] = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,lowercase_ )
return [m.group(0 ) for m in matches]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = 2 if text == "✅" or text == "❌" else len(lowercase_ )
_UpperCamelCase : Union[str, Any] = (width - text_length) // 2
_UpperCamelCase : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase : str = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCamelCase : Dict = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : Dict = collections.defaultdict(lowercase_ )
_UpperCamelCase : int = collections.defaultdict(lowercase_ )
_UpperCamelCase : str = collections.defaultdict(lowercase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase_ ):
_UpperCamelCase : List[str] = None
if attr_name.endswith("Tokenizer" ):
_UpperCamelCase : Tuple = slow_tokenizers
_UpperCamelCase : Any = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCamelCase : Optional[Any] = fast_tokenizers
_UpperCamelCase : List[str] = attr_name[:-13]
elif _re_tf_models.match(lowercase_ ) is not None:
_UpperCamelCase : List[Any] = tf_models
_UpperCamelCase : Dict = _re_tf_models.match(lowercase_ ).groups()[0]
elif _re_flax_models.match(lowercase_ ) is not None:
_UpperCamelCase : Dict = flax_models
_UpperCamelCase : Union[str, Any] = _re_flax_models.match(lowercase_ ).groups()[0]
elif _re_pt_models.match(lowercase_ ) is not None:
_UpperCamelCase : Optional[int] = pt_models
_UpperCamelCase : Any = _re_pt_models.match(lowercase_ ).groups()[0]
if lookup_dict is not None:
while len(lowercase_ ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCamelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCamelCase : List[str] = "".join(camel_case_split(lowercase_ )[:-1] )
# Let's build that table!
_UpperCamelCase : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCamelCase : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCamelCase : Union[str, Any] = [len(lowercase_ ) + 2 for c in columns]
_UpperCamelCase : Any = max([len(lowercase_ ) for name in model_names] ) + 2
# Build the table per se
_UpperCamelCase : Tuple = "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for c, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCamelCase : Union[str, Any] = {True: "✅", False: "❌"}
for name in model_names:
_UpperCamelCase : Optional[int] = model_name_to_prefix[name]
_UpperCamelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase_ ,lowercase_ ) for l, w in zip(lowercase_ ,lowercase_ )] ) + "|\n"
return table
def lowercase__ ( lowercase_=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = _find_text_in_file(
filename=os.path.join(lowercase_ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,)
_UpperCamelCase : Any = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase_ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 51
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCamelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 12_8022
lowerCamelCase__ = 12_8028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = MaMaaaTokenizer
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :Tuple = False
SCREAMING_SNAKE_CASE__ :str = True
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
super().setUp()
_UpperCamelCase : List[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCamelCase : Dict = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Dict ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] ) -> int:
return (
"This is a test",
"This is a test",
)
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
_UpperCamelCase : List[str] = "</s>"
_UpperCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.get_tokenizer()
_UpperCamelCase : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(__a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2, 3, 4, 5, 6] , )
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
_UpperCamelCase : Any = tokenizer.convert_tokens_to_string(__a )
self.assertEqual(__a , "This is a test" )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
# fmt: off
_UpperCamelCase : List[Any] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE__ :Optional[int] = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE__ :int = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> str:
_UpperCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_UpperCamelCase : Union[str, Any] = 1
return cls
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
_UpperCamelCase : Union[str, Any] = "en"
_UpperCamelCase : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
_UpperCamelCase : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
_UpperCamelCase : Dict = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
_UpperCamelCase : Tuple = tempfile.mkdtemp()
_UpperCamelCase : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a )
_UpperCamelCase : int = MaMaaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.lang_token_to_id , __a )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : Optional[Any] = "en"
_UpperCamelCase : int = "fr"
_UpperCamelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
_UpperCamelCase : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_UpperCamelCase : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Any = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_UpperCamelCase : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
_UpperCamelCase : Any = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_UpperCamelCase : List[str] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
} , )
| 720
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, float]:
"""simple docstring"""
_UpperCamelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def lowercase__ ( lowercase_ ,lowercase_ ) -> tuple[str, str]:
"""simple docstring"""
_UpperCamelCase : Tuple = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = list(lowercase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
_UpperCamelCase : int = random.choice(lowercase_ )
return "".join(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,) -> list[str]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
_UpperCamelCase : List[str] = int(parent_a[1] * 100 ) + 1
_UpperCamelCase : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
_UpperCamelCase : Dict = population_score[random.randint(0 ,lowercase_ )][0]
_UpperCamelCase, _UpperCamelCase : Dict = crossover(parent_a[0] ,lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ ,lowercase_ ) )
pop.append(mutate(lowercase_ ,lowercase_ ) )
return pop
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCamelCase : List[str] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCamelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCamelCase : int = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowercase_ )
# Generate random starting population.
_UpperCamelCase : Union[str, Any] = []
for _ in range(lowercase_ ):
population.append("".join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCamelCase, _UpperCamelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCamelCase : int = [evaluate(lowercase_ ,lowercase_ ) for item in population]
# Check if there is a matching evolution.
_UpperCamelCase : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[1] ,reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCamelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
_UpperCamelCase : str = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] ,lowercase_ ,lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 51
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = DDIMPipeline
SCREAMING_SNAKE_CASE__ :Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ :List[str] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE__ :Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
_UpperCamelCase : List[Any] = DDIMScheduler()
_UpperCamelCase : Any = {"unet": unet, "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Union[str, Any] , __a : int=0 ) -> Dict:
if str(__a ).startswith("mps" ):
_UpperCamelCase : List[str] = torch.manual_seed(__a )
else:
_UpperCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
_UpperCamelCase : Tuple = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = "cpu"
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : Any = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_UpperCamelCase : List[str] = self.get_dummy_inputs(__a )
_UpperCamelCase : Tuple = pipe(**__a ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCamelCase : List[Any] = np.array(
[1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] )
_UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
_UpperCamelCase : List[Any] = "google/ddpm-cifar10-32"
_UpperCamelCase : Tuple = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : str = DDIMScheduler()
_UpperCamelCase : Union[str, Any] = DDIMPipeline(unet=__a , scheduler=__a )
ddim.to(__a )
ddim.set_progress_bar_config(disable=__a )
_UpperCamelCase : Dict = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = ddim(generator=__a , eta=0.0 , output_type="numpy" ).images
_UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Any = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
_UpperCamelCase : Any = "google/ddpm-ema-bedroom-256"
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = DDIMScheduler.from_pretrained(__a )
_UpperCamelCase : int = DDIMPipeline(unet=__a , scheduler=__a )
ddpm.to(__a )
ddpm.set_progress_bar_config(disable=__a )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Tuple = ddpm(generator=__a , output_type="numpy" ).images
_UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase : int = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 721
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
_UpperCamelCase : List[str] = name.replace("emb" ,"model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCamelCase : Optional[int] = name.replace("transformer" ,"model.decoder" )
if "cross_attention" in name:
_UpperCamelCase : Optional[int] = name.replace("cross_attention" ,"encoder_attn" )
if "linear1" in name:
_UpperCamelCase : Optional[Any] = name.replace("linear1" ,"fc1" )
if "linear2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("linear2" ,"fc2" )
if "norm1" in name:
_UpperCamelCase : Optional[Any] = name.replace("norm1" ,"self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCamelCase : Dict = name.replace("norm_cross" ,"encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCamelCase : Union[str, Any] = name.replace("norm2" ,"final_layer_norm" )
if "out_norm" in name:
_UpperCamelCase : Union[str, Any] = name.replace("out_norm" ,"model.decoder.layer_norm" )
if "linears" in name:
_UpperCamelCase : List[str] = name.replace("linears" ,"lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCamelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" ,"enc_to_dec_proj" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_UpperCamelCase : str = list(state_dict.keys() )
_UpperCamelCase : Optional[Any] = {}
for key in keys:
_UpperCamelCase : Optional[int] = state_dict.pop(lowercase_ )
_UpperCamelCase : List[Any] = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCamelCase : Tuple = val[:hidden_size, :]
_UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCamelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCamelCase : Optional[Any] = val
else:
_UpperCamelCase : List[str] = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( lowercase_ ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_UpperCamelCase : List[Any] = 1_024
_UpperCamelCase : List[str] = 24
_UpperCamelCase : Any = 16
elif checkpoint == "medium":
_UpperCamelCase : Tuple = 1_536
_UpperCamelCase : Dict = 48
_UpperCamelCase : Tuple = 24
elif checkpoint == "large":
_UpperCamelCase : int = 2_048
_UpperCamelCase : Optional[int] = 48
_UpperCamelCase : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_UpperCamelCase : str = MusicgenDecoderConfig(
hidden_size=lowercase_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=lowercase_ ,num_attention_heads=lowercase_ ,)
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_="cpu" ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : str = MusicGen.get_pretrained(lowercase_ ,device=lowercase_ )
_UpperCamelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ )
_UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
_UpperCamelCase, _UpperCamelCase : Optional[Any] = rename_state_dict(
lowercase_ ,hidden_size=decoder_config.hidden_size )
_UpperCamelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCamelCase : str = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCamelCase, _UpperCamelCase : str = decoder.load_state_dict(lowercase_ ,strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(lowercase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_UpperCamelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ ,audio_encoder=lowercase_ ,decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
_UpperCamelCase : List[str] = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
_UpperCamelCase : Dict = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(input_ids=lowercase_ ,decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCamelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_UpperCamelCase : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" ,padding_side="left" )
_UpperCamelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
_UpperCamelCase : str = 2_048
_UpperCamelCase : str = 2_048
# set other default generation config params
_UpperCamelCase : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
_UpperCamelCase : List[str] = True
_UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 51
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Any=1 , _snake_case : Optional[Any]=False , **_snake_case : List[Any] ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = vocab_size
A__ = d_embed
A__ = d_proj
A__ = cutoffs + [vocab_size]
A__ = [0] + self.cutoffs
A__ = div_val
A__ = self.cutoffs[0]
A__ = len(self.cutoffs ) - 1
A__ = self.shortlist_size + self.n_clusters
A__ = keep_order
A__ = []
A__ = []
def _a ( self : Optional[int] , _snake_case : Any ):
"""simple docstring"""
if self.n_clusters > 0:
A__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_snake_case , name='cluster_weight' )
A__ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_snake_case , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=F'''out_projs_._{i}''' , )
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
A__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_snake_case , name=F'''out_layers_._{i}_._weight''' , )
A__ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_snake_case , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ = self.d_embed // (self.div_val**i)
A__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=F'''out_projs_._{i}''' )
self.out_projs.append(_snake_case )
A__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_snake_case , name=F'''out_layers_._{i}_._weight''' , )
A__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_snake_case , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def _a ( _snake_case : List[Any] , _snake_case : str , _snake_case : Any , _snake_case : Union[str, Any]=None ):
"""simple docstring"""
A__ = x
if proj is not None:
A__ = tf.einsum('ibd,ed->ibe' , _snake_case , _snake_case )
return tf.einsum('ibd,nd->ibn' , _snake_case , _snake_case ) + b
@staticmethod
def _a ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = shape_list(_snake_case )
A__ = tf.range(lp_size[0] , dtype=target.dtype )
A__ = tf.stack([r, target] , 1 )
return tf.gather_nd(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Any=True , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
A__ = 0
if self.n_clusters == 0:
A__ = self._logit(_snake_case , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case , logits=_snake_case )
A__ = tf.nn.log_softmax(_snake_case , axis=-1 )
else:
A__ = shape_list(_snake_case )
A__ = []
A__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A__ , A__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A__ = (target >= l_idx) & (target < r_idx)
A__ = tf.where(_snake_case )
A__ = tf.boolean_mask(_snake_case , _snake_case ) - l_idx
if self.div_val == 1:
A__ = self.out_layers[0][0][l_idx:r_idx]
A__ = self.out_layers[0][1][l_idx:r_idx]
else:
A__ = self.out_layers[i][0]
A__ = self.out_layers[i][1]
if i == 0:
A__ = tf.concat([cur_W, self.cluster_weight] , 0 )
A__ = tf.concat([cur_b, self.cluster_bias] , 0 )
A__ = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[0] )
A__ = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A__ = tf.boolean_mask(_snake_case , _snake_case )
A__ = self._gather_logprob(_snake_case , _snake_case )
else:
A__ = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[i] )
A__ = tf.nn.log_softmax(_snake_case )
A__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
A__ = tf.boolean_mask(_snake_case , _snake_case )
A__ = tf.boolean_mask(_snake_case , _snake_case )
A__ = self._gather_logprob(_snake_case , _snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case , -cur_logprob , shape_list(_snake_case ) )
A__ = tf.concat(_snake_case , axis=-1 )
if target is not None:
if return_mean:
A__ = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 52
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "Salesforce/blip-image-captioning-base"
A__ : Any = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
A__ : Optional[Any] = "image_captioner"
A__ : Dict = AutoModelForVisionaSeq
A__ : int = ["image"]
A__ : int = ["text"]
def __init__( self : Tuple , *_snake_case : Optional[int] , **_snake_case : str ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*_snake_case , **_snake_case )
def _a ( self : List[Any] , _snake_case : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_snake_case , return_tensors='pt' )
def _a ( self : Any , _snake_case : Optional[int] ):
"""simple docstring"""
return self.model.generate(**_snake_case )
def _a ( self : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(_snake_case , skip_special_tokens=_snake_case )[0].strip()
| 52
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( __UpperCamelCase ) -> Any:
if is_torch_version('<' , '2.0.0' ) or not hasattr(__UpperCamelCase , '_dynamo' ):
return False
return isinstance(__UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def A ( __UpperCamelCase , __UpperCamelCase = True ) -> List[str]:
A__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A__ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A__ = model
A__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ = model.module
if not keep_fpaa_wrapper:
A__ = getattr(__UpperCamelCase , 'forward' )
A__ = model.__dict__.pop('_original_forward' , __UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase , '__wrapped__' ):
A__ = forward.__wrapped__
if forward == original_forward:
break
A__ = forward
if getattr(__UpperCamelCase , '_converted_to_transformer_engine' , __UpperCamelCase ):
convert_model(__UpperCamelCase , to_transformer_engine=__UpperCamelCase )
if is_compiled:
A__ = model
A__ = compiled_model
return model
def A ( ) -> List[Any]:
PartialState().wait_for_everyone()
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase , __UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase , __UpperCamelCase )
@contextmanager
def A ( **__UpperCamelCase ) -> List[str]:
for key, value in kwargs.items():
A__ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( __UpperCamelCase ) -> List[str]:
if not hasattr(__UpperCamelCase , '__qualname__' ) and not hasattr(__UpperCamelCase , '__name__' ):
A__ = getattr(__UpperCamelCase , '__class__' , __UpperCamelCase )
if hasattr(__UpperCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(__UpperCamelCase , '__name__' ):
return obj.__name__
return str(__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
for key, value in source.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ = destination.setdefault(__UpperCamelCase , {} )
merge_dicts(__UpperCamelCase , __UpperCamelCase )
else:
A__ = value
return destination
def A ( __UpperCamelCase = None ) -> bool:
if port is None:
A__ = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 52
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
A__ : Optional[Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A__ : Tuple = "document_qa"
A__ : str = AutoProcessor
A__ : Union[str, Any] = VisionEncoderDecoderModel
A__ : int = ["image", "text"]
A__ : Any = ["text"]
def __init__( self : Optional[int] , *_snake_case : Tuple , **_snake_case : Tuple ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_snake_case , **_snake_case )
def _a ( self : Union[str, Any] , _snake_case : "Image" , _snake_case : str ):
"""simple docstring"""
A__ = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
A__ = task_prompt.replace('{user_input}' , _snake_case )
A__ = self.pre_processor.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors='pt' ).input_ids
A__ = self.pre_processor(_snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : int , _snake_case : Optional[Any] ):
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences
def _a ( self : List[Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = self.pre_processor.batch_decode(_snake_case )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
A__ = re.sub(R'<.*?>' , '' , _snake_case , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(_snake_case )
return sequence["answer"]
| 52
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 1
|
import math
class __lowerCAmelCase :
"""simple docstring"""
def _a ( self : List[Any] , _snake_case : list[list[float]] , _snake_case : list[int] ):
"""simple docstring"""
A__ = 0.0
A__ = 0.0
for i in range(len(_snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a ( self : Dict , _snake_case : list[list[int | float]] , _snake_case : list[int] , _snake_case : int , _snake_case : float ):
"""simple docstring"""
for i in range(len(_snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A ( ) -> None:
# Training Examples ( m, n )
A__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ = SelfOrganizingMap()
A__ = 3
A__ = 0.5
for _ in range(__UpperCamelCase ):
for j in range(len(__UpperCamelCase ) ):
# training sample
A__ = training_samples[j]
# Compute the winning vector
A__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase )
# Update the winning vector
A__ = self_organizing_map.update(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# classify test sample
A__ = [0, 0, 0, 1]
A__ = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 52
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def A ( __UpperCamelCase ) -> List[str]:
A__ = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def A ( __UpperCamelCase ) -> Any:
A__ = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def A ( ) -> List[str]:
A__ = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = 'imagenet-1k-id2label.json'
A__ = 1_000
A__ = 'huggingface/label-files'
A__ = num_labels
A__ = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = A__ = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
A__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
A__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ = [2, 2, 20]
A__ = [3, 12, 16]
A__ = [192, 768, 1_024]
A__ = CvtForImageClassification(__UpperCamelCase )
A__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
A__ = image_size
A__ = torch.load(__UpperCamelCase , map_location=torch.device('cpu' ) )
A__ = OrderedDict()
A__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ = list_of_state_dict + cls_token(__UpperCamelCase )
A__ = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
A__ = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
A__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
A__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 52
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase ) -> int:
if not nums:
return 0
A__ = nums[0]
A__ = 0
for num in nums[1:]:
A__ , A__ = (
max_excluding + num,
max(__UpperCamelCase , __UpperCamelCase ),
)
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A ( __UpperCamelCase ) -> int:
A__ = 384
if "tiny" in model_name:
A__ = [3, 3, 9, 3]
A__ = [96, 192, 384, 768]
if "small" in model_name:
A__ = [3, 3, 27, 3]
A__ = [96, 192, 384, 768]
if "base" in model_name:
A__ = [3, 3, 27, 3]
A__ = [128, 256, 512, 1_024]
A__ = 512
if "large" in model_name:
A__ = [3, 3, 27, 3]
A__ = [192, 384, 768, 1_536]
A__ = 768
if "xlarge" in model_name:
A__ = [3, 3, 27, 3]
A__ = [256, 512, 1_024, 2_048]
A__ = 1_024
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = ConvNextConfig(
depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A__ = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['state_dict']
A__ = get_upernet_config(__UpperCamelCase )
A__ = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(__UpperCamelCase )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(__UpperCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 1
|
def A ( __UpperCamelCase ) -> Dict:
A__ = []
A__ = []
A__ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
A__ = len(__UpperCamelCase ) if (len(__UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(__UpperCamelCase ) , 'Postfix'.center(__UpperCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__UpperCamelCase ) == 0:
stack.append(__UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(__UpperCamelCase )).ljust(__UpperCamelCase ) , (''.join(__UpperCamelCase )).ljust(__UpperCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(__UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(__UpperCamelCase )).ljust(__UpperCamelCase ) , (''.join(__UpperCamelCase )).ljust(__UpperCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(__UpperCamelCase ) # return Postfix as str
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__UpperCamelCase ) ):
if infix[i] == "(":
A__ = ')' # change "(" to ")"
elif infix[i] == ")":
A__ = '(' # change ")" to "("
return (infix_2_postfix(''.join(__UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
SCREAMING_SNAKE_CASE__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 1
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __get__( self : str , _snake_case : List[str] , _snake_case : List[Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
A__ = '__cached_' + self.fget.__name__
A__ = getattr(_snake_case , _snake_case , _snake_case )
if cached is None:
A__ = self.fget(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
return cached
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def A ( __UpperCamelCase ) -> Any:
if is_torch_fx_proxy(__UpperCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__UpperCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__UpperCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__UpperCamelCase , np.ndarray )
def A ( __UpperCamelCase ) -> Union[str, Any]:
return isinstance(__UpperCamelCase , np.ndarray )
def A ( __UpperCamelCase ) -> List[Any]:
return _is_numpy(__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
import torch
return isinstance(__UpperCamelCase , torch.Tensor )
def A ( __UpperCamelCase ) -> Any:
return False if not is_torch_available() else _is_torch(__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
import torch
return isinstance(__UpperCamelCase , torch.device )
def A ( __UpperCamelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_device(__UpperCamelCase )
def A ( __UpperCamelCase ) -> List[Any]:
import torch
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if hasattr(__UpperCamelCase , __UpperCamelCase ):
A__ = getattr(__UpperCamelCase , __UpperCamelCase )
else:
return False
return isinstance(__UpperCamelCase , torch.dtype )
def A ( __UpperCamelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_dtype(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Any:
import tensorflow as tf
return isinstance(__UpperCamelCase , tf.Tensor )
def A ( __UpperCamelCase ) -> List[Any]:
return False if not is_tf_available() else _is_tensorflow(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__UpperCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__UpperCamelCase )
return type(__UpperCamelCase ) == tf.Tensor
def A ( __UpperCamelCase ) -> Any:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Optional[Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__UpperCamelCase , jnp.ndarray )
def A ( __UpperCamelCase ) -> str:
return False if not is_flax_available() else _is_jax(__UpperCamelCase )
def A ( __UpperCamelCase ) -> Any:
if isinstance(__UpperCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__UpperCamelCase ) for k, v in obj.items()}
elif isinstance(__UpperCamelCase , (list, tuple) ):
return [to_py_obj(__UpperCamelCase ) for o in obj]
elif is_tf_tensor(__UpperCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__UpperCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__UpperCamelCase ):
return np.asarray(__UpperCamelCase ).tolist()
elif isinstance(__UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( __UpperCamelCase ) -> Dict:
if isinstance(__UpperCamelCase , (dict, UserDict) ):
return {k: to_numpy(__UpperCamelCase ) for k, v in obj.items()}
elif isinstance(__UpperCamelCase , (list, tuple) ):
return np.array(__UpperCamelCase )
elif is_tf_tensor(__UpperCamelCase ):
return obj.numpy()
elif is_torch_tensor(__UpperCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__UpperCamelCase ):
return np.asarray(__UpperCamelCase )
else:
return obj
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
A__ = fields(self )
# Safety and consistency checks
if not len(_snake_case ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
A__ = getattr(self , class_fields[0].name )
A__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_snake_case ):
if isinstance(_snake_case , _snake_case ):
A__ = first_field.items()
A__ = True
else:
try:
A__ = iter(_snake_case )
A__ = True
except TypeError:
A__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_snake_case ):
if (
not isinstance(_snake_case , (list, tuple) )
or not len(_snake_case ) == 2
or not isinstance(element[0] , _snake_case )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A__ = element[1]
elif first_field is not None:
A__ = first_field
else:
for field in class_fields:
A__ = getattr(self , field.name )
if v is not None:
A__ = v
def __delitem__( self : List[Any] , *_snake_case : str , **_snake_case : Optional[Any] ):
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _a ( self : Any , *_snake_case : List[str] , **_snake_case : List[str] ):
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _a ( self : Optional[int] , *_snake_case : List[Any] , **_snake_case : str ):
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _a ( self : Union[str, Any] , *_snake_case : Tuple , **_snake_case : Optional[int] ):
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
A__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any] , _snake_case : Tuple , _snake_case : int ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_snake_case , _snake_case )
super().__setattr__(_snake_case , _snake_case )
def __setitem__( self : List[str] , _snake_case : str , _snake_case : Any ):
"""simple docstring"""
super().__setitem__(_snake_case , _snake_case )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_snake_case , _snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
@classmethod
def _a ( cls : Any , _snake_case : Optional[int] ):
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = "longest"
A__ : str = "max_length"
A__ : Dict = "do_not_pad"
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : int = "pt"
A__ : List[str] = "tf"
A__ : List[Any] = "np"
A__ : Any = "jax"
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[ContextManager] ):
"""simple docstring"""
A__ = context_managers
A__ = ExitStack()
def __enter__( self : Tuple ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(_snake_case )
def __exit__( self : List[Any] , *_snake_case : Tuple , **_snake_case : str ):
"""simple docstring"""
self.stack.__exit__(*_snake_case , **_snake_case )
def A ( __UpperCamelCase ) -> List[Any]:
A__ = infer_framework(__UpperCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( __UpperCamelCase ) -> Dict:
A__ = model_class.__name__
A__ = infer_framework(__UpperCamelCase )
if framework == "tf":
A__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A__ = inspect.signature(model_class.forward ) # PyTorch models
else:
A__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( __UpperCamelCase , __UpperCamelCase = "" , __UpperCamelCase = "." ) -> Any:
def _flatten_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
for k, v in d.items():
A__ = str(__UpperCamelCase ) + delimiter + str(__UpperCamelCase ) if parent_key else k
if v and isinstance(__UpperCamelCase , __UpperCamelCase ):
yield from flatten_dict(__UpperCamelCase , __UpperCamelCase , delimiter=__UpperCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
@contextmanager
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( __UpperCamelCase , __UpperCamelCase=None ) -> List[Any]:
if is_numpy_array(__UpperCamelCase ):
return np.transpose(__UpperCamelCase , axes=__UpperCamelCase )
elif is_torch_tensor(__UpperCamelCase ):
return array.T if axes is None else array.permute(*__UpperCamelCase )
elif is_tf_tensor(__UpperCamelCase ):
import tensorflow as tf
return tf.transpose(__UpperCamelCase , perm=__UpperCamelCase )
elif is_jax_tensor(__UpperCamelCase ):
return jnp.transpose(__UpperCamelCase , axes=__UpperCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(__UpperCamelCase )}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if is_numpy_array(__UpperCamelCase ):
return np.reshape(__UpperCamelCase , __UpperCamelCase )
elif is_torch_tensor(__UpperCamelCase ):
return array.reshape(*__UpperCamelCase )
elif is_tf_tensor(__UpperCamelCase ):
import tensorflow as tf
return tf.reshape(__UpperCamelCase , __UpperCamelCase )
elif is_jax_tensor(__UpperCamelCase ):
return jnp.reshape(__UpperCamelCase , __UpperCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(__UpperCamelCase )}.''' )
def A ( __UpperCamelCase , __UpperCamelCase=None ) -> Tuple:
if is_numpy_array(__UpperCamelCase ):
return np.squeeze(__UpperCamelCase , axis=__UpperCamelCase )
elif is_torch_tensor(__UpperCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__UpperCamelCase )
elif is_tf_tensor(__UpperCamelCase ):
import tensorflow as tf
return tf.squeeze(__UpperCamelCase , axis=__UpperCamelCase )
elif is_jax_tensor(__UpperCamelCase ):
return jnp.squeeze(__UpperCamelCase , axis=__UpperCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(__UpperCamelCase )}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if is_numpy_array(__UpperCamelCase ):
return np.expand_dims(__UpperCamelCase , __UpperCamelCase )
elif is_torch_tensor(__UpperCamelCase ):
return array.unsqueeze(dim=__UpperCamelCase )
elif is_tf_tensor(__UpperCamelCase ):
import tensorflow as tf
return tf.expand_dims(__UpperCamelCase , axis=__UpperCamelCase )
elif is_jax_tensor(__UpperCamelCase ):
return jnp.expand_dims(__UpperCamelCase , axis=__UpperCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__UpperCamelCase )}.''' )
def A ( __UpperCamelCase ) -> Optional[Any]:
if is_numpy_array(__UpperCamelCase ):
return np.size(__UpperCamelCase )
elif is_torch_tensor(__UpperCamelCase ):
return array.numel()
elif is_tf_tensor(__UpperCamelCase ):
import tensorflow as tf
return tf.size(__UpperCamelCase )
elif is_jax_tensor(__UpperCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__UpperCamelCase )}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
for key, value in auto_map.items():
if isinstance(__UpperCamelCase , (tuple, list) ):
A__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
A__ = f'''{repo_id}--{value}'''
return auto_map
def A ( __UpperCamelCase ) -> Optional[int]:
for base_class in inspect.getmro(__UpperCamelCase ):
A__ = base_class.__module__
A__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
from collections import deque
def A ( __UpperCamelCase ) -> List[Any]:
A__ = len(__UpperCamelCase )
A__ = deque()
A__ = [False for _ in range(__UpperCamelCase )]
A__ = [-1 for _ in range(__UpperCamelCase )]
A__ = index_of[:]
def strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A__ = index # the number when this node is seen
A__ = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCamelCase )
A__ = True
for w in g[v]:
if index_of[w] == -1:
A__ = strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ = []
A__ = stack.pop()
A__ = False
component.append(__UpperCamelCase )
while w != v:
A__ = stack.pop()
A__ = False
component.append(__UpperCamelCase )
components.append(__UpperCamelCase )
return index
A__ = []
for v in range(__UpperCamelCase ):
if index_of[v] == -1:
strong_connect(__UpperCamelCase , 0 , __UpperCamelCase )
return components
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = [[] for _ in range(__UpperCamelCase )]
for u, v in edges:
g[u].append(__UpperCamelCase )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE__ = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 52
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 1
|
from __future__ import annotations
def A ( __UpperCamelCase ) -> list[int]:
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def A ( __UpperCamelCase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def A ( ) -> None:
A__ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , __UpperCamelCase )
print('Decoded:' , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 52
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
A__ , A__ = create_model(
'HTSAT-tiny' , 'roberta' , __UpperCamelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__UpperCamelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def A ( __UpperCamelCase ) -> Any:
A__ = {}
A__ = r'.*sequential.(\d+).*'
A__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A__ = key.replace(__UpperCamelCase , __UpperCamelCase )
if re.match(__UpperCamelCase , __UpperCamelCase ):
# replace sequential layers with list
A__ = re.match(__UpperCamelCase , __UpperCamelCase ).group(1 )
A__ = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(__UpperCamelCase )//3}.linear.''' )
elif re.match(__UpperCamelCase , __UpperCamelCase ):
A__ = int(re.match(__UpperCamelCase , __UpperCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A__ = 1 if projecton_layer == 0 else 2
A__ = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A__ = value
A__ = mixed_qkv.size(0 ) // 3
A__ = mixed_qkv[:qkv_dim]
A__ = mixed_qkv[qkv_dim : qkv_dim * 2]
A__ = mixed_qkv[qkv_dim * 2 :]
A__ = query_layer
A__ = key_layer
A__ = value_layer
else:
A__ = value
return model_state_dict
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Union[str, Any]:
A__ , A__ = init_clap(__UpperCamelCase , enable_fusion=__UpperCamelCase )
clap_model.eval()
A__ = clap_model.state_dict()
A__ = rename_state_dict(__UpperCamelCase )
A__ = ClapConfig()
A__ = enable_fusion
A__ = ClapModel(__UpperCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
transformers_config.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 52
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "align_text_model"
def __init__( self : Optional[int] , _snake_case : List[str]=3_05_22 , _snake_case : int=7_68 , _snake_case : Any=12 , _snake_case : List[Any]=12 , _snake_case : Optional[int]=30_72 , _snake_case : List[str]="gelu" , _snake_case : str=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : str=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=0.02 , _snake_case : str=1E-12 , _snake_case : Any=0 , _snake_case : Tuple="absolute" , _snake_case : Union[str, Any]=True , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = pad_token_id
@classmethod
def _a ( cls : str , _snake_case : Union[str, os.PathLike] , **_snake_case : Dict ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
A__ , A__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "align_vision_model"
def __init__( self : Optional[Any] , _snake_case : int = 3 , _snake_case : int = 6_00 , _snake_case : float = 2.0 , _snake_case : float = 3.1 , _snake_case : int = 8 , _snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , _snake_case : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , _snake_case : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , _snake_case : List[int] = [] , _snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , _snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , _snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , _snake_case : float = 0.25 , _snake_case : str = "swish" , _snake_case : int = 25_60 , _snake_case : str = "mean" , _snake_case : float = 0.02 , _snake_case : float = 0.001 , _snake_case : float = 0.99 , _snake_case : float = 0.2 , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = num_channels
A__ = image_size
A__ = width_coefficient
A__ = depth_coefficient
A__ = depth_divisor
A__ = kernel_sizes
A__ = in_channels
A__ = out_channels
A__ = depthwise_padding
A__ = strides
A__ = num_block_repeats
A__ = expand_ratios
A__ = squeeze_expansion_ratio
A__ = hidden_act
A__ = hidden_dim
A__ = pooling_type
A__ = initializer_range
A__ = batch_norm_eps
A__ = batch_norm_momentum
A__ = drop_connect_rate
A__ = sum(_snake_case ) * 4
@classmethod
def _a ( cls : Any , _snake_case : Union[str, os.PathLike] , **_snake_case : Optional[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case )
A__ , A__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_snake_case , **_snake_case )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = "align"
A__ : str = True
def __init__( self : Any , _snake_case : List[str]=None , _snake_case : List[str]=None , _snake_case : str=6_40 , _snake_case : Union[str, Any]=1.0 , _snake_case : int=0.02 , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case )
if text_config is None:
A__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
A__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
A__ = AlignTextConfig(**_snake_case )
A__ = AlignVisionConfig(**_snake_case )
A__ = projection_dim
A__ = temperature_init_value
A__ = initializer_range
@classmethod
def _a ( cls : Optional[int] , _snake_case : AlignTextConfig , _snake_case : AlignVisionConfig , **_snake_case : Union[str, Any] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A ( ) -> Tuple:
A__ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__UpperCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__UpperCamelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__UpperCamelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__UpperCamelCase , default=0 , help='cuda_id.' , )
A__ = parser.parse_args()
return args
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if not len(__UpperCamelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
A__ , A__ = imgs[0].size
A__ = Image.new('RGB' , size=(cols * w, rows * h) )
A__ , A__ = grid.size
for i, img in enumerate(__UpperCamelCase ):
grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def A ( __UpperCamelCase , __UpperCamelCase="robotic cat with wings" , __UpperCamelCase=7.5 , __UpperCamelCase=50 , __UpperCamelCase=1 , __UpperCamelCase=42 , ) -> Union[str, Any]:
A__ = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase )
A__ = pipeline(
__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images
A__ = int(math.sqrt(__UpperCamelCase ) )
A__ = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE__ = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE__ = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE__ = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 52
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> Dict:
A__ = 'huggingface/label-files'
A__ = 'imagenet-1k-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A__ = BitConfig(
conv_layer=__UpperCamelCase , num_labels=1_000 , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def A ( __UpperCamelCase ) -> List[str]:
if "stem.conv" in name:
A__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
A__ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
A__ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
A__ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
A__ = 'bit.encoder.' + name
return name
def A ( ) -> List[Any]:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
A__ = get_config(__UpperCamelCase )
# load original model from timm
A__ = create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A__ = timm_model.state_dict()
for key in state_dict.copy().keys():
A__ = state_dict.pop(__UpperCamelCase )
A__ = val.squeeze() if 'head' in key else val
# load HuggingFace model
A__ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=__UpperCamelCase ) )
A__ = transform.transforms
A__ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A__ = BitImageProcessor(
do_resize=__UpperCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(__UpperCamelCase ).unsqueeze(0 )
A__ = processor(__UpperCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase , __UpperCamelCase )
# verify logits
with torch.no_grad():
A__ = model(__UpperCamelCase )
A__ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
A__ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 1
|
from __future__ import annotations
from typing import Any
def A ( __UpperCamelCase ) -> None:
create_state_space_tree(__UpperCamelCase , [] , 0 )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : TransformeraDModel , _snake_case : AutoencoderKL , _snake_case : KarrasDiffusionSchedulers , _snake_case : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_snake_case , vae=_snake_case , scheduler=_snake_case )
# create a imagenet -> id dictionary for easier use
A__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
A__ = int(_snake_case )
A__ = dict(sorted(self.labels.items() ) )
def _a ( self : int , _snake_case : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
A__ = list(_snake_case )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : List[int] , _snake_case : float = 4.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ):
"""simple docstring"""
A__ = len(_snake_case )
A__ = self.transformer.config.sample_size
A__ = self.transformer.config.in_channels
A__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_snake_case , device=self.device , dtype=self.transformer.dtype , )
A__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A__ = torch.tensor(_snake_case , device=self.device ).reshape(-1 )
A__ = torch.tensor([10_00] * batch_size , device=self.device )
A__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A__ = latent_model_input[: len(_snake_case ) // 2]
A__ = torch.cat([half, half] , dim=0 )
A__ = self.scheduler.scale_model_input(_snake_case , _snake_case )
A__ = t
if not torch.is_tensor(_snake_case ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A__ = latent_model_input.device.type == 'mps'
if isinstance(_snake_case , _snake_case ):
A__ = torch.floataa if is_mps else torch.floataa
else:
A__ = torch.intaa if is_mps else torch.intaa
A__ = torch.tensor([timesteps] , dtype=_snake_case , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A__ = self.transformer(
_snake_case , timestep=_snake_case , class_labels=_snake_case ).sample
# perform guidance
if guidance_scale > 1:
A__ , A__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A__ , A__ = torch.split(_snake_case , len(_snake_case ) // 2 , dim=0 )
A__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A__ = torch.cat([half_eps, half_eps] , dim=0 )
A__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A__ , A__ = torch.split(_snake_case , _snake_case , dim=1 )
else:
A__ = noise_pred
# compute previous image: x_t -> x_t-1
A__ = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
if guidance_scale > 1:
A__ , A__ = latent_model_input.chunk(2 , dim=0 )
else:
A__ = latent_model_input
A__ = 1 / self.vae.config.scaling_factor * latents
A__ = self.vae.decode(_snake_case ).sample
A__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_snake_case )
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.