code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from collections.abc import Sequence
def __lowerCamelCase ( snake_case__ = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 ,len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = nums[i]
_SCREAMING_SNAKE_CASE = max(snake_case__ ,ans + num ,snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase = int(input('''Enter number of elements : ''').strip())
UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 306
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A_ = logging.get_logger(__name__)
A_ = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
A_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case_ = 'whisper'
snake_case_ = ['past_key_values']
snake_case_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , snake_case : Dict=5_1865 , snake_case : int=80 , snake_case : int=6 , snake_case : Tuple=4 , snake_case : Any=6 , snake_case : str=4 , snake_case : Dict=1536 , snake_case : List[Any]=1536 , snake_case : List[Any]=0.0 , snake_case : int=0.0 , snake_case : Any=5_0257 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : List[str]="gelu" , snake_case : Union[str, Any]=256 , snake_case : List[str]=0.0 , snake_case : str=0.0 , snake_case : List[Any]=0.0 , snake_case : int=0.02 , snake_case : List[Any]=False , snake_case : List[str]=1500 , snake_case : Dict=448 , snake_case : Tuple=5_0256 , snake_case : List[str]=5_0256 , snake_case : Any=5_0256 , snake_case : Optional[int]=None , snake_case : Any=[220, 5_0256] , snake_case : Dict=False , snake_case : Dict=256 , snake_case : Dict=False , snake_case : Dict=0.05 , snake_case : Optional[int]=10 , snake_case : str=2 , snake_case : int=0.0 , snake_case : int=10 , snake_case : int=0 , snake_case : int=7 , **snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : Dict = vocab_size
A__ : List[str] = num_mel_bins
A__ : str = d_model
A__ : Dict = encoder_layers
A__ : Tuple = encoder_attention_heads
A__ : Dict = decoder_layers
A__ : int = decoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : List[Any] = encoder_ffn_dim
A__ : Optional[Any] = dropout
A__ : Tuple = attention_dropout
A__ : str = activation_dropout
A__ : Tuple = activation_function
A__ : List[Any] = init_std
A__ : Dict = encoder_layerdrop
A__ : Tuple = decoder_layerdrop
A__ : Dict = use_cache
A__ : List[Any] = encoder_layers
A__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A__ : Any = max_source_positions
A__ : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A__ : Any = classifier_proj_size
A__ : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : List[str] = mask_time_prob
A__ : List[str] = mask_time_length
A__ : Union[str, Any] = mask_time_min_masks
A__ : Optional[int] = mask_feature_prob
A__ : Union[str, Any] = mask_feature_length
A__ : Union[str, Any] = mask_feature_min_masks
A__ : Dict = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Any = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A__ : List[str] = {0: 'batch'}
else:
A__ : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case : int = -1 , snake_case : int = -1 , snake_case : bool = False , snake_case : Optional["TensorType"] = None , snake_case : int = 2_2050 , snake_case : float = 5.0 , snake_case : int = 220 , ):
'''simple docstring'''
A__ : Union[str, Any] = OrderedDict()
A__ : Any = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
A__ : Union[str, Any] = encoder_inputs['input_features'].shape[2]
A__ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A__ : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
A__ : Any = encoder_inputs.pop("""input_features""" )
A__ : Dict = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A__ : Union[str, Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 1e-3
| 350
|
"""simple docstring"""
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : List[Any] = order
# a_{0} ... a_{k}
A__ : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ : List[str] = [0.0] * self.order
def _UpperCamelCase ( self : Optional[int] , snake_case : list[float] , snake_case : list[float] ):
'''simple docstring'''
if len(snake_case ) < self.order:
A__ : Any = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
A__ : str = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
A__ : Union[str, Any] = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case )}'
)
raise ValueError(snake_case )
A__ : Dict = a_coeffs
A__ : Any = b_coeffs
def _UpperCamelCase ( self : List[str] , snake_case : float ):
'''simple docstring'''
A__ : str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ : Tuple = self.input_history[:-1]
A__ : int = self.output_history[:-1]
A__ : Dict = sample
A__ : Tuple = result
return result
| 296
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['GLPNFeatureExtractor']
_A = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62
| 1
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : list[tuple[float, float]] ):
snake_case__ : str = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case__ : Dict = len(snake_case_ ) - 1
def lowerCamelCase ( self : Optional[Any] , snake_case_ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case__ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , snake_case_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case_ ) , 5 ) == 1
return output_values
def lowerCamelCase ( self : Dict , snake_case_ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case__ : Any = self.basis_function(snake_case_ )
snake_case__ : Dict = 0.0
snake_case__ : Optional[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase ( self : Optional[Any] , snake_case_ : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
snake_case__ : list[float] = [] # x coordinates of points to plot
snake_case__ : list[float] = [] # y coordinates of points to plot
snake_case__ : Tuple = 0.0
while t <= 1:
snake_case__ : str = self.bezier_curve_function(snake_case_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case__ : List[str] = [i[0] for i in self.list_of_points]
snake_case__ : str = [i[1] for i in self.list_of_points]
plt.plot(
snake_case_ , snake_case_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(snake_case_ , snake_case_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 357
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : List[Any] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : str = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
snake_case__ : Optional[Any] = str(_lowerCAmelCase )
steps += 1
return steps
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : Optional[int] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : Dict = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
snake_case__ : List[Any] = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
snake_case : str = True
except (ImportError, AttributeError):
snake_case : List[Any] = object
def lowerCAmelCase_ ( *_snake_case : Optional[Any] , **_snake_case : Any ) -> Tuple:
'''simple docstring'''
pass
snake_case : List[str] = False
snake_case : List[str] = logging.get_logger("transformers-cli/serving")
def lowerCAmelCase_ ( _snake_case : Namespace ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_snake_case , args.host , args.port , args.workers )
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
class _snake_case ( snake_case ):
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
__magic_name__ : Tuple = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=_a , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=_a , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=_a , default=8_888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=_a , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=_a , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=_a , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=_a , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=_a , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a ):
__magic_name__ : Optional[Any] = pipeline
__magic_name__ : List[str] = host
__magic_name__ : str = port
__magic_name__ : Any = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__magic_name__ : int = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=_a , response_class=_a , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=_a , response_class=_a , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=_a , response_class=_a , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=_a , response_class=_a , methods=["POST"] , ),
] , timeout=600 , )
def SCREAMING_SNAKE_CASE ( self ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def SCREAMING_SNAKE_CASE ( self ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def SCREAMING_SNAKE_CASE ( self , _a = Body(_a , embed=_a ) , _a = Body(_a , embed=_a ) ):
try:
__magic_name__ : str = self._pipeline.tokenizer.tokenize(_a )
if return_ids:
__magic_name__ : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(_a )
return ServeTokenizeResult(tokens=_a , tokens_ids=_a )
else:
return ServeTokenizeResult(tokens=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_a )} )
def SCREAMING_SNAKE_CASE ( self , _a = Body(_a , embed=_a ) , _a = Body(_a , embed=_a ) , _a = Body(_a , embed=_a ) , ):
try:
__magic_name__ : Dict = self._pipeline.tokenizer.decode(_a , _a , _a )
return ServeDeTokenizeResult(model="" , text=_a )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(_a )} )
async def SCREAMING_SNAKE_CASE ( self , _a=Body(_a , embed=_a ) ):
# Check we don't have empty string
if len(_a ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__magic_name__ : Any = self._pipeline(_a )
return ServeForwardResult(output=_a )
except Exception as e:
raise HTTPException(500 , {"error": str(_a )} )
| 281
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : int = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[Any] = is_training
__magic_name__ : Dict = use_labels
__magic_name__ : Optional[Any] = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Any = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : int = bos_token_id
__magic_name__ : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Tuple = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : List[str] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : Union[str, Any] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : List[Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[int] = inputs_dict["input_ids"]
__magic_name__ : Union[str, Any] = input_ids[:1, :]
__magic_name__ : str = inputs_dict["attention_mask"][:1, :]
__magic_name__ : int = 1
# first forward pass
__magic_name__ : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : List[str] = model(_a , attention_mask=_a )[0]
__magic_name__ : Dict = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : str = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFLEDModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Any = True
__magic_name__ : str = self.model_tester.seq_length
__magic_name__ : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : str = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : Any = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
__magic_name__ : Optional[int] = model_class(_a )
__magic_name__ : str = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : Tuple = model_class(_a )
__magic_name__ : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Dict = True
__magic_name__ : str = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : List[str] = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Optional[int] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Optional[int] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : List[Any] = model(**_a )[0]
__magic_name__ : List[str] = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : int = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : str = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 281
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]:
UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size
UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights
UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 1_00 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ = datasets.load_iris()
UpperCamelCase__ = iris.data[:, :2]
UpperCamelCase__ = (iris.target != 0) * 1
UpperCamelCase__ = 0.1
UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def a__ ( lowerCAmelCase__ ) -> Dict:
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 299
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Any = """markuplm"""
def __init__( self : Optional[int] , _UpperCAmelCase : Union[str, Any]=3_05_22 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1E-12 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : int=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Any=2_56 , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Tuple=2_16 , _UpperCAmelCase : int=10_01 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : List[str]=50 , _UpperCAmelCase : Any="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
# additional properties
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = max_xpath_tag_unit_embeddings
UpperCAmelCase__ = max_xpath_subs_unit_embeddings
UpperCAmelCase__ = tag_pad_id
UpperCAmelCase__ = subs_pad_id
UpperCAmelCase__ = xpath_unit_hidden_size
| 346
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase_ = state_dict[
f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase_ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.dense.{w}"]
UpperCAmelCase_ = state_dict[f"cls.predictions.transform.LayerNorm.{w}"]
print(f"N layers selected for distillation: {std_idx}")
print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(f"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 346
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE ) as metadata_file:
snake_case_ = json.load(SCREAMING_SNAKE_CASE )
snake_case_ = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
snake_case_ = load_original_entity_vocab(SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
snake_case_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case_ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
snake_case_ = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE )
snake_case_ = '''MLukeTokenizer'''
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
snake_case_ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
snake_case_ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
snake_case_ = state_dict['''embeddings.word_embeddings.weight''']
snake_case_ = word_emb[ent_init_index].unsqueeze(0 )
snake_case_ = word_emb[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case_ = state_dict[bias_name]
snake_case_ = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case_ = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ = f'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ = state_dict['''entity_embeddings.entity_embeddings.weight''']
snake_case_ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case_ = state_dict['''entity_predictions.bias''']
snake_case_ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case_ = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
snake_case_ = state_dict[key]
else:
snake_case_ = state_dict[key]
snake_case_ , snake_case_ = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if set(SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , task='''entity_classification''' )
snake_case_ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
snake_case_ = (0, 9)
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 33, 768) )
snake_case_ = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 1, 768) )
snake_case_ = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case_ = '''Tokyo is the capital of <mask>.'''
snake_case_ = (24, 30)
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = encoding['''input_ids'''][0].tolist()
snake_case_ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
snake_case_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE )
snake_case_ = outputs.entity_logits[0][0].argmax().item()
snake_case_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
snake_case_ = [json.loads(SCREAMING_SNAKE_CASE ) for line in open(SCREAMING_SNAKE_CASE )]
snake_case_ = {}
for entry in data:
snake_case_ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case_ = entity_id
break
snake_case_ = f'''{language}:{entity_name}'''
snake_case_ = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 356
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = None
__snake_case = None
@property
def UpperCamelCase__ ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''padding_value''' ) )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = self.feat_extract_tester.seq_length_diff
snake_case_ = self.feat_extract_tester.max_seq_length + pad_diff
snake_case_ = self.feat_extract_tester.min_seq_length
snake_case_ = self.feat_extract_tester.batch_size
snake_case_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
snake_case_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' )[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to smallest with np
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to middle
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = 12
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCAmelCase )
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 267
| 0
|
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
import random
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i in plain:
SCREAMING_SNAKE_CASE = random.randint(1 ,300 )
SCREAMING_SNAKE_CASE = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase__ ) ):
SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 296
| 1
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : Union[str, Any] = logging.getLogger(__name__)
a : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a__ )} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowerCamelCase : bool =field(default=a__ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowerCamelCase : float =field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowerCamelCase : float =field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowerCamelCase : int =field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowerCamelCase : int =field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( _lowercase : DataTrainingArguments , _lowercase : PreTrainedTokenizer , _lowercase : bool = False , _lowercase : Optional[str] = None , ) ->Optional[int]:
'''simple docstring'''
def _dataset(_lowercase : List[Any] , _lowercase : Any=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_lowercase , file_path=_lowercase , block_size=args.block_size , ref_path=_lowercase , )
return LineByLineTextDataset(tokenizer=_lowercase , file_path=_lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowercase , file_path=_lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _SCREAMING_SNAKE_CASE ( ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a, a, a : Tuple = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
a : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
a : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
a : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
a : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
a : Any = AutoModelWithLMHead.from_config(_lowercase )
model.resize_token_embeddings(len(_lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
a : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
a : int = (
get_dataset(_lowercase , tokenizer=_lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
a : List[Any] = (
get_dataset(_lowercase , tokenizer=_lowercase , evaluate=_lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
a : Any = DataCollatorForWholeWordMask(
tokenizer=_lowercase , mlm_probability=data_args.mlm_probability )
else:
a : Tuple = DataCollatorForLanguageModeling(
tokenizer=_lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a : Optional[Any] = Trainer(
model=_lowercase , args=_lowercase , data_collator=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , prediction_loss_only=_lowercase , )
# Training
if training_args.do_train:
a : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a : int = trainer.evaluate()
a : List[Any] = math.exp(eval_output["eval_loss"] )
a : Tuple = {"perplexity": perplexity}
a : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_lowercase )
return results
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 79
|
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
| 1
|
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int )->str:
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any )->Any:
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str = 20 )->List[str]:
A__ = 1
for i in range(1 , n + 1 ):
A__ = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 193
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase : Any = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 308
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __snake_case=64 , __snake_case=1 , __snake_case=3 , __snake_case=180 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> Dict:
'''simple docstring'''
super().__init__(**__snake_case )
__a =image_size
__a =patch_size
__a =num_channels
__a =embed_dim
__a =depths
__a =len(__snake_case )
__a =num_heads
__a =window_size
__a =mlp_ratio
__a =qkv_bias
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =drop_path_rate
__a =hidden_act
__a =use_absolute_embeddings
__a =layer_norm_eps
__a =initializer_range
__a =upscale
__a =img_range
__a =resi_connection
__a =upsampler
| 308
| 1
|
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = [0] * len(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase__ = j
return prefix_result
def a_ ( lowerCamelCase ):
return max(prefix_function(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__A = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__A = {
'''RUCAIBox/mvp''': 1024,
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = MvpTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]="replace" , UpperCAmelCase : Dict="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[str]=True , **UpperCAmelCase : List[Any] , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
__lowerCamelCase : List[str] = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
__lowerCamelCase : Dict = add_prefix_space
__lowerCamelCase : str = pre_tok_class(**UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCamelCase : int = "post_processor"
__lowerCamelCase : List[str] = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
__lowerCamelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase : List[str] = tuple(state["sep"] )
if "cls" in state:
__lowerCamelCase : Dict = tuple(state["cls"] )
__lowerCamelCase : str = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
__lowerCamelCase : List[str] = add_prefix_space
__lowerCamelCase : Optional[Any] = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
__lowerCamelCase : Dict = trim_offsets
__lowerCamelCase : Tuple = True
if changes_to_apply:
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase , state.pop("type" ) )
__lowerCamelCase : Union[str, Any] = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Tuple ):
__lowerCamelCase : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
__lowerCamelCase : List[str] = value
def lowerCamelCase__ ( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
__lowerCamelCase : List[str] = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
__lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : Tuple = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ):
__lowerCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 367
|
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class _snake_case ( a__ ):
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , "decord" )
self.check_model_type(UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = {}
if frame_sampling_rate is not None:
__lowerCamelCase : int = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase : List[str] = num_frames
__lowerCamelCase : Optional[Any] = {}
if top_k is not None:
__lowerCamelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int , UpperCAmelCase : Union[str, List[str]] , **UpperCAmelCase : Tuple ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=1 ):
if num_frames is None:
__lowerCamelCase : Optional[int] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__lowerCamelCase : Optional[int] = BytesIO(requests.get(UpperCAmelCase ).content )
__lowerCamelCase : Optional[Any] = VideoReader(UpperCAmelCase )
videoreader.seek(0 )
__lowerCamelCase : str = 0
__lowerCamelCase : int = num_frames * frame_sampling_rate - 1
__lowerCamelCase : Any = np.linspace(UpperCAmelCase , UpperCAmelCase , num=UpperCAmelCase , dtype=np.intaa )
__lowerCamelCase : List[str] = videoreader.get_batch(UpperCAmelCase ).asnumpy()
__lowerCamelCase : Any = list(UpperCAmelCase )
__lowerCamelCase : Any = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.model(**UpperCAmelCase )
return model_outputs
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase : str = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase , __lowerCamelCase : int = probs.topk(UpperCAmelCase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__lowerCamelCase : Union[str, Any] = scores.tolist()
__lowerCamelCase : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 64
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = KandinskyVaaControlnetImgaImgPipeline
__lowerCAmelCase : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__lowerCAmelCase : Any = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__lowerCAmelCase : Optional[int] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCAmelCase : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase : int = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.dummy_unet
UpperCAmelCase : Dict = self.dummy_movq
UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase : Optional[Any] = DDIMScheduler(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : int = output.images
UpperCAmelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Any = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase : Tuple = init_image.resize((512, 512) )
UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCAmelCase : Optional[int] = torch.from_numpy(np.array(_SCREAMING_SNAKE_CASE ) ).float() / 255.0
UpperCAmelCase : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Any = """A robot, 4k photo"""
UpperCAmelCase : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : int = pipe_prior(
_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.85 , generator=_SCREAMING_SNAKE_CASE , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase : Union[str, Any] = pipeline(
image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , hint=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109
|
'''simple docstring'''
import numpy as np
def a__ ( a__ , a__ , a__ = 1E-1_2 , a__ = 1_00 , ):
"""simple docstring"""
assert np.shape(a__ )[0] == np.shape(a__ )[1]
# Ensure proper dimensionality.
assert np.shape(a__ )[0] == np.shape(a__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a__ ) == np.iscomplexobj(a__ )
__SCREAMING_SNAKE_CASE = np.iscomplexobj(a__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1E1_2
while not convergence:
# Multiple matrix by the vector.
__SCREAMING_SNAKE_CASE = np.dot(a__ , a__ )
# Normalize the resulting output vector.
__SCREAMING_SNAKE_CASE = w / np.linalg.norm(a__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
__SCREAMING_SNAKE_CASE = np.dot(a__ , np.dot(a__ , a__ ) )
# Check convergence.
__SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = lambda_
if is_complex:
__SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
__SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
__SCREAMING_SNAKE_CASE = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__SCREAMING_SNAKE_CASE = real_input_matrix
__SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
__SCREAMING_SNAKE_CASE = complex_input_matrix
__SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = power_iteration(a__ , a__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.linalg.eigh(a__ )
# Last eigenvalue is the maximum one.
__SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a__ ) - np.abs(a__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 267
| 0
|
"""simple docstring"""
from __future__ import annotations
import queue
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = data
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Optional[int] = None
def _snake_case ( ) -> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCAmelCase_ :Dict = input("""Enter the value of the root node: """ ).strip().lower()
lowerCAmelCase_ :queue.Queue = queue.Queue()
lowerCAmelCase_ :Tuple = TreeNode(int(lowercase__ ) )
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :int = q.get()
lowerCAmelCase_ :List[str] = f"""Enter the left node of {node_found.data}: """
lowerCAmelCase_ :Optional[int] = input(lowercase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase_ :Optional[Any] = TreeNode(int(lowercase__ ) )
lowerCAmelCase_ :int = left_node
q.put(lowercase__ )
lowerCAmelCase_ :List[str] = f"""Enter the right node of {node_found.data}: """
lowerCAmelCase_ :Optional[int] = input(lowercase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase_ :str = TreeNode(int(lowercase__ ) )
lowerCAmelCase_ :Optional[Any] = right_node
q.put(lowercase__ )
raise
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :queue.Queue = queue.Queue()
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :Union[str, Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :queue.Queue = queue.Queue()
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :List[str] = []
while not q.empty():
lowerCAmelCase_ :Tuple = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase__ )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :list[TreeNode] = []
lowerCAmelCase_ :Tuple = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowercase__ )
lowerCAmelCase_ :Optional[int] = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ :List[Any] = stack.pop()
# start to traverse its right child
lowerCAmelCase_ :Union[str, Any] = n.right
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :list[TreeNode] = []
lowerCAmelCase_ :Union[str, Any] = node
while n or stack:
while n:
stack.append(lowercase__ )
lowerCAmelCase_ :List[str] = n.left
lowerCAmelCase_ :Any = stack.pop()
print(n.data , end=""",""" )
lowerCAmelCase_ :Any = n.right
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ :Dict = [], []
lowerCAmelCase_ :Optional[Any] = node
stacka.append(lowercase__ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ :Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( lowercase__ : str = "" , lowercase__ : Union[str, Any]=5_0 , lowercase__ : int="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = divmod(width - len(lowercase__ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__UpperCAmelCase = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
snake_case = '''bit'''
snake_case = ['''preactivation''', '''bottleneck''']
snake_case = ['''SAME''', '''VALID''']
def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : int=64 , __UpperCAmelCase : List[str]=[256, 512, 1024, 2048] , __UpperCAmelCase : Any=[3, 4, 6, 3] , __UpperCAmelCase : List[Any]="preactivation" , __UpperCAmelCase : Tuple="relu" , __UpperCAmelCase : int=None , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_A = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
_A = num_channels
_A = embedding_size
_A = hidden_sizes
_A = depths
_A = layer_type
_A = hidden_act
_A = global_padding
_A = num_groups
_A = drop_path_rate
_A = embedding_dynamic_padding
_A = output_stride
_A = width_factor
_A = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 79
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """marian"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _snake_case=5_8101 , _snake_case=None , _snake_case=1024 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=1024 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=5_8100 , _snake_case=False , _snake_case=5_8100 , _snake_case=0 , _snake_case=0 , _snake_case=True , **_snake_case , ) -> Any:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = decoder_vocab_size or vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , )
class lowercase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(_snake_case ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCAmelCase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super().outputs
else:
UpperCAmelCase = super(_snake_case , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase = self.num_layers
for i in range(_snake_case ):
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Generate decoder inputs
UpperCAmelCase = seq_length if not self.use_past else 1
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase = dict(**_snake_case , **_snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1]
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = decoder_seq_length + 3
UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_snake_case , _snake_case )] , dim=1 )
UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase = min(_snake_case , _snake_case )
UpperCAmelCase = max(_snake_case , _snake_case ) - min_num_layers
UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
torch.zeros(_snake_case ),
) )
# TODO: test this.
UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_snake_case , _snake_case ):
common_inputs["past_key_values"].append((torch.zeros(_snake_case ), torch.zeros(_snake_case )) )
return common_inputs
def snake_case_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase , UpperCAmelCase = self.num_layers
UpperCAmelCase , UpperCAmelCase = self.num_attention_heads
UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase = common_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
UpperCAmelCase = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(_snake_case )
]
return common_inputs
def snake_case_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = tokenizer.num_special_tokens_to_add(_snake_case )
UpperCAmelCase = compute_effective_axis_dimension(
_snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase = dict(tokenizer(_snake_case , return_tensors=_snake_case ) )
return common_inputs
def snake_case_ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
else:
UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
return common_inputs
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case )
else:
UpperCAmelCase = super(_snake_case , self )._flatten_past_key_values_(
_snake_case , _snake_case , _snake_case , _snake_case )
@property
def snake_case_ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 351
|
def _lowerCAmelCase ( A__: list[int] , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 152
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 1
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
"""simple docstring"""
pass
| 367
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__A : Optional[Any] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = LxmertTokenizer
def __init__( self : Dict , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Any="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Tuple = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : List[Any] = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : Tuple = do_lower_case
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ) -> int:
lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 89
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __a ):
A__ : List[str] = ["pixel_values"]
def __init__(self : List[Any] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PIL.Image.BICUBIC , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a_ )
snake_case : int = size if size is not None else {"""height""": 2_56, """width""": 2_56}
snake_case : Optional[int] = get_size_dict(a_ )
snake_case : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case : str = get_size_dict(a_ , param_name="crop_size" )
snake_case : int = do_resize
snake_case : Any = size
snake_case : List[str] = resample
snake_case : str = do_center_crop
snake_case : Optional[Any] = crop_size
snake_case : Dict = do_rescale
snake_case : Optional[int] = rescale_factor
snake_case : str = do_normalize
snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PIL.Image.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
a_ , size=(size["height"], size["width"]) , resample=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ) -> Union[str, Any]:
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[Any] = resample if resample is not None else self.resample
snake_case : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case : Union[str, Any] = size if size is not None else self.size
snake_case : Dict = get_size_dict(a_ )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[Any] = get_size_dict(a_ , param_name="crop_size" )
snake_case : List[str] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case : Union[str, Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
snake_case : Optional[int] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
snake_case : Any = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
snake_case : Optional[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
snake_case : List[Any] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
snake_case : int = [to_channel_dimension_format(a_ , a_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 59
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
_snake_case : List[Any] = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
_snake_case , _snake_case : List[str] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 64
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( __UpperCamelCase ):
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[int] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) == 0 or len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(SCREAMING_SNAKE_CASE__ ) )
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :int = [sequences]
__SCREAMING_SNAKE_CASE :Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(SCREAMING_SNAKE_CASE__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCamelCase )
class _SCREAMING_SNAKE_CASE( __UpperCamelCase ):
def __init__( self ,SCREAMING_SNAKE_CASE__=ZeroShotClassificationArgumentHandler() ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = args_parser
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=TruncationStrategy.ONLY_FIRST ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__SCREAMING_SNAKE_CASE :Optional[int] = self.tokenizer.eos_token
try:
__SCREAMING_SNAKE_CASE :Optional[int] = self.tokenizer(
SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,)
except Exception as e:
if "too short" in str(SCREAMING_SNAKE_CASE__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.tokenizer(
SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=TruncationStrategy.DO_NOT_TRUNCATE ,)
else:
raise e
return inputs
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if kwargs.get('''multi_class''' ,SCREAMING_SNAKE_CASE__ ) is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = kwargs['multi_class']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__SCREAMING_SNAKE_CASE :Dict = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE :Optional[int] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE :List[str] = kwargs['hypothesis_template']
__SCREAMING_SNAKE_CASE :List[Any] = {}
if "multi_label" in kwargs:
__SCREAMING_SNAKE_CASE :Union[str, Any] = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) -> List[str]:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) == 0:
pass
elif len(SCREAMING_SNAKE_CASE__ ) == 1 and "candidate_labels" not in kwargs:
__SCREAMING_SNAKE_CASE :int = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="This example is {}." ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self._args_parser(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ):
__SCREAMING_SNAKE_CASE :Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(SCREAMING_SNAKE_CASE__ ) - 1,
**model_input,
}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = inputs['candidate_label']
__SCREAMING_SNAKE_CASE :Dict = inputs['sequence']
__SCREAMING_SNAKE_CASE :int = {k: inputs[k] for k in self.tokenizer.model_input_names}
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = [outputs['candidate_label'] for outputs in model_outputs]
__SCREAMING_SNAKE_CASE :List[str] = [outputs['sequence'] for outputs in model_outputs]
__SCREAMING_SNAKE_CASE :Tuple = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__SCREAMING_SNAKE_CASE :Dict = logits.shape[0]
__SCREAMING_SNAKE_CASE :Dict = len(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = N // n
__SCREAMING_SNAKE_CASE :Any = logits.reshape((num_sequences, n, -1) )
if multi_label or len(SCREAMING_SNAKE_CASE__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__SCREAMING_SNAKE_CASE :str = self.entailment_id
__SCREAMING_SNAKE_CASE :List[str] = -1 if entailment_id == 0 else 0
__SCREAMING_SNAKE_CASE :List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.exp(SCREAMING_SNAKE_CASE__ ) / np.exp(SCREAMING_SNAKE_CASE__ ).sum(-1 ,keepdims=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__SCREAMING_SNAKE_CASE :int = reshaped_outputs[..., self.entailment_id]
__SCREAMING_SNAKE_CASE :Tuple = np.exp(SCREAMING_SNAKE_CASE__ ) / np.exp(SCREAMING_SNAKE_CASE__ ).sum(-1 ,keepdims=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 351
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__SCREAMING_SNAKE_CASE :Optional[int] = deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ ,'''r''' ,encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE :Dict = json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
__SCREAMING_SNAKE_CASE :str = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__SCREAMING_SNAKE_CASE :Optional[int] = config
self.set_stage_and_offload()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_value('''zero_optimization.stage''' ,-1 )
# offload
__SCREAMING_SNAKE_CASE :Any = False
if self.is_zeroa() or self.is_zeroa():
__SCREAMING_SNAKE_CASE :Optional[Any] = set(['''cpu''', '''nvme'''] )
__SCREAMING_SNAKE_CASE :Optional[Any] = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__SCREAMING_SNAKE_CASE :int = True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :Union[str, Any] = ds_key_long.split('''.''' )
__SCREAMING_SNAKE_CASE :List[str] = nodes.pop()
for node in nodes:
__SCREAMING_SNAKE_CASE :Dict = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.config
# find the config node of interest if it exists
__SCREAMING_SNAKE_CASE :str = ds_key_long.split('''.''' )
for node in nodes:
__SCREAMING_SNAKE_CASE :Any = config
__SCREAMING_SNAKE_CASE :int = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._stage == 2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._offload
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = engine
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,device_placement=SCREAMING_SNAKE_CASE__ ,scaler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = hasattr(self.optimizer ,'''overflow''' )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0.0_0_1 ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = params
__SCREAMING_SNAKE_CASE :str = lr
__SCREAMING_SNAKE_CASE :str = weight_decay
__SCREAMING_SNAKE_CASE :int = kwargs
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ,**SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = optimizer
__SCREAMING_SNAKE_CASE :Union[str, Any] = total_num_steps
__SCREAMING_SNAKE_CASE :Any = warmup_num_steps
__SCREAMING_SNAKE_CASE :int = kwargs
| 239
| 0
|
'''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
while second != 0:
__a : List[Any] = first & second
first ^= second
__a : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Any = int(input('Enter the first number: ').strip())
__lowercase : int = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 368
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 0
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=UpperCamelCase__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=UpperCamelCase__ , default=5 )
parser.add_argument('''--batch_size''' , type=UpperCamelCase__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=UpperCamelCase__ , default=1 )
parser.add_argument('''--freeze''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument('''--learning_rate''' , type=UpperCamelCase__ , default=5E-4 )
parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=UpperCamelCase__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=UpperCamelCase__ , default=10 )
parser.add_argument('''--weight_decay''' , type=UpperCamelCase__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=UpperCamelCase__ , default='''./results''' )
return parser.parse_args()
_lowerCAmelCase :int = load('accuracy')
def lowerCamelCase_ (UpperCamelCase__ : Any ):
_UpperCAmelCase : Any = eval_pred
_UpperCAmelCase : Any = np.argmax(UpperCamelCase__ , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A ) -> None:
super().__init__()
_UpperCAmelCase : Any = trainer
def __lowerCAmelCase ( self , A , A , A , **A ) -> Dict:
if control.should_evaluate:
_UpperCAmelCase : Union[str, Any] = deepcopy(__lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = get_args()
set_seed(args.seed )
_UpperCAmelCase : List[str] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase : List[Any] = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase : Union[str, Any] = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase : Optional[Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : Any = tokenizer.eos_token
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase : List[str] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = tokenizer(example['''src'''] , truncation=UpperCamelCase__ , max_length=1024 )
_UpperCAmelCase : Optional[Any] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase : Optional[Any] = train_test_validation.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase : Any = DataCollatorWithPadding(tokenizer=UpperCamelCase__ )
_UpperCAmelCase : Dict = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase : Any = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(UpperCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 263
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """CLIPImageProcessor"""
snake_case_ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : int=None , **__lowercase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , **__lowercase : str ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : int =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def __magic_name__ ( self : int , *__lowercase : Optional[Any] , **__lowercase : Tuple ) -> Dict:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 152
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : str = PegasusTokenizer
A : Dict = PegasusTokenizerFast
A : str = True
A : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = PegasusTokenizer(UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large')
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = '</s>'
a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '</s>')
self.assertEqual(vocab_keys[-1] , 'v')
self.assertEqual(len(UpperCAmelCase_) , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : int = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
a : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Dict = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a : str = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
a : Tuple = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : str = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
a : List[str] = 'To ensure a smooth flow of bank resolutions.'
a : Any = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3]) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Any = ['This is going to be way too long.' * 1_5_0, 'short example']
a : str = ['not super long but more than 5 tokens', 'tiny']
a : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = PegasusTokenizer
A : Any = PegasusTokenizerFast
A : List[str] = True
A : str = True
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Union[str, Any] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token='[MASK]')
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname)
a : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname)
a : Any = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
a : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
a : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : str = ['This is going to be way too long.' * 1_0_0_0, 'short example']
a : int = ['not super long but more than 5 tokens', 'tiny']
a : List[Any] = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
a : Dict = self._large_tokenizer(UpperCAmelCase_).input_ids
self.assertListEqual(
UpperCAmelCase_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 354
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :List[str] ) -> int:
a__ = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
a__ = []
for i in range(len(self.block_out_channels ) - 1 ):
a__ = self.block_out_channels[i]
a__ = self.block_out_channels[i + 1]
a__ = nn.Conv(
__snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(__snake_case )
a__ = nn.Conv(
__snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(__snake_case )
a__ = blocks
a__ = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Tuple ,__snake_case :List[str] ) -> Tuple:
a__ = self.conv_in(__snake_case )
a__ = nn.silu(__snake_case )
for block in self.blocks:
a__ = block(__snake_case )
a__ = nn.silu(__snake_case )
a__ = self.conv_out(__snake_case )
return embedding
@flax_register_to_config
class snake_case_ (nn.Module , lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : int = 3_2
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ : Union[bool, Tuple[bool]] = False
UpperCAmelCase__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Union[int, Tuple[int]] = 8
UpperCAmelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase__ : int = 1_2_8_0
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = "rgb"
UpperCAmelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def lowerCamelCase__( self :Any ,__snake_case :jax.random.KeyArray ) -> FrozenDict:
# init input tensors
a__ = (1, self.in_channels, self.sample_size, self.sample_size)
a__ = jnp.zeros(__snake_case ,dtype=jnp.floataa )
a__ = jnp.ones((1,) ,dtype=jnp.intaa )
a__ = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
a__ = (1, 3, self.sample_size * 8, self.sample_size * 8)
a__ = jnp.zeros(__snake_case ,dtype=jnp.floataa )
a__ , a__ = jax.random.split(__snake_case )
a__ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )["params"]
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
a__ = self.block_out_channels
a__ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ = self.num_attention_heads or self.attention_head_dim
# input
a__ = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
a__ = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
a__ = FlaxTimestepEmbedding(__snake_case ,dtype=self.dtype )
a__ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
a__ = self.only_cross_attention
if isinstance(__snake_case ,__snake_case ):
a__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case ,__snake_case ):
a__ = (num_attention_heads,) * len(self.down_block_types )
# down
a__ = []
a__ = []
a__ = block_out_channels[0]
a__ = nn.Conv(
__snake_case ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
a__ = output_channel
a__ = block_out_channels[i]
a__ = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case ,out_channels=__snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
a__ = FlaxDownBlockaD(
in_channels=__snake_case ,out_channels=__snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(__snake_case )
for _ in range(self.layers_per_block ):
a__ = nn.Conv(
__snake_case ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__snake_case )
if not is_final_block:
a__ = nn.Conv(
__snake_case ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__snake_case )
a__ = down_blocks
a__ = controlnet_down_blocks
# mid
a__ = block_out_channels[-1]
a__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=__snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
a__ = nn.Conv(
__snake_case ,kernel_size=(1, 1) ,padding='VALID' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Tuple ,__snake_case :float = 1.0 ,__snake_case :bool = True ,__snake_case :bool = False ,) -> Union[FlaxControlNetOutput, Tuple]:
a__ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
a__ = jnp.flip(__snake_case ,axis=1 )
# 1. time
if not isinstance(__snake_case ,jnp.ndarray ):
a__ = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(__snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ = timesteps.astype(dtype=jnp.floataa )
a__ = jnp.expand_dims(__snake_case ,0 )
a__ = self.time_proj(__snake_case )
a__ = self.time_embedding(__snake_case )
# 2. pre-process
a__ = jnp.transpose(__snake_case ,(0, 2, 3, 1) )
a__ = self.conv_in(__snake_case )
a__ = jnp.transpose(__snake_case ,(0, 2, 3, 1) )
a__ = self.controlnet_cond_embedding(__snake_case )
sample += controlnet_cond
# 3. down
a__ = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case ,__snake_case ):
a__ , a__ = down_block(__snake_case ,__snake_case ,__snake_case ,deterministic=not train )
else:
a__ , a__ = down_block(__snake_case ,__snake_case ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
a__ = self.mid_block(__snake_case ,__snake_case ,__snake_case ,deterministic=not train )
# 5. contronet blocks
a__ = ()
for down_block_res_sample, controlnet_block in zip(__snake_case ,self.controlnet_down_blocks ):
a__ = controlnet_block(__snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
a__ = controlnet_down_block_res_samples
a__ = self.controlnet_mid_block(__snake_case )
# 6. scaling
a__ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__snake_case ,mid_block_res_sample=__snake_case )
| 109
|
snake_case : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __lowercase ( __lowerCAmelCase : float ):
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
a__ = int(__lowerCAmelCase )
a__ = ''
a__ = False
if decimal < 0:
a__ = True
decimal *= -1
while decimal > 0:
a__ , a__ = divmod(__lowerCAmelCase , 1_6 )
a__ = values[remainder] + hexadecimal
a__ = '0x' + hexadecimal
if negative:
a__ = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 1
|
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
_UpperCAmelCase = False
if num < 0:
_UpperCAmelCase = True
_UpperCAmelCase = -num
_UpperCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(__lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : List[str] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : Tuple = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : Optional[int] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ):
lowercase_ : int = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase_ : Optional[int] = [[refs[i] for refs in references] for i in range(lowercase_ )]
lowercase_ : List[str] = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
lowercase_ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 239
| 0
|
lowerCamelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Any = input('Enter message: ' )
lowerCAmelCase__ : Tuple = input('Enter key [alphanumeric]: ' )
lowerCAmelCase__ : Optional[int] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCAmelCase__ : Tuple = 'encrypt'
lowerCAmelCase__ : Any = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith('d' ):
lowerCAmelCase__ : Any = 'decrypt'
lowerCAmelCase__ : List[str] = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''\n{mode.title()}ed message:''' )
print(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
return translate_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encrypt' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
return translate_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decrypt' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = key.upper()
for symbol in message:
lowerCAmelCase__ : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(SCREAMING_SNAKE_CASE_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = 0
else:
translated.append(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 307
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE_ : Optional[Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A ( self : str ) -> int:
torch.manual_seed(0 )
lowercase_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase_ : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowercase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
lowercase_ : Any = CLIPTextModel(A )
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def A ( self : int , A : List[str] , A : List[str]=0 ) -> Any:
if str(A ).startswith('''mps''' ):
lowercase_ : Optional[Any] = torch.manual_seed(A )
else:
lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowercase_ : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def A ( self : str ) -> Optional[Any]:
lowercase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : List[Any] = TextToVideoSDPipeline(**A )
lowercase_ : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowercase_ : int = self.get_dummy_inputs(A )
lowercase_ : Optional[Any] = '''np'''
lowercase_ : Tuple = sd_pipe(**A ).frames
lowercase_ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase_ : Tuple = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : int ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self : List[str] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def A ( self : Optional[Any] ) -> Tuple:
pass
def A ( self : List[str] ) -> Tuple:
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
lowercase_ : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ : List[str] = pipe.to('''cuda''' )
lowercase_ : List[str] = '''Spiderman is surfing'''
lowercase_ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ : int = pipe(A , generator=A , num_inference_steps=25 , output_type='''pt''' ).frames
lowercase_ : Union[str, Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A ( self : Any ) -> Dict:
lowercase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
lowercase_ : int = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
lowercase_ : List[Any] = pipe.to('''cuda''' )
lowercase_ : Optional[Any] = '''Spiderman is surfing'''
lowercase_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase_ : Union[str, Any] = pipe(A , generator=A , num_inference_steps=2 , output_type='''pt''' ).frames
lowercase_ : Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 33
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( lowercase__ ):
lowercase__ : Dict = """trocr"""
lowercase__ : str = ["""past_key_values"""]
lowercase__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , _UpperCamelCase : Union[str, Any]=50_265 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Optional[Any]=16 , _UpperCamelCase : Tuple=4_096 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : int=512 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : int=0.0 , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=False , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Dict=2 , **_UpperCamelCase : str , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_embedding
SCREAMING_SNAKE_CASE = use_learned_position_embeddings
SCREAMING_SNAKE_CASE = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 351
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowercase ( a ):
lowercase__ : List[str] = ["""pixel_values"""]
def __init__( self : List[str] , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Union[int, float] = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case( self : Optional[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : float , _UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(_UpperCamelCase , size=_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=_UpperCamelCase , **_UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Union[float, List[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : float = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[float, List[float]]] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : Any , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , crop_pct=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 206
| 0
|
from math import pi, sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_a ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_a )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case( ) -> List[Any]:
assert gamma(0.5 ) == sqrt(_a )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Optional[int] = 1.0
while num:
lowercase : List[str] = float(input("""Gamma of: """))
print(F'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 20
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345
| 0
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase : Union[str, Any] = get_logger(__name__)
class A ( enum.Enum ):
__magic_name__ = '''all_checks'''
__magic_name__ = '''basic_checks'''
__magic_name__ = '''no_checks'''
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
A : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A : Dict = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
class A ( UpperCamelCase__ ):
pass
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
A : int = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info('''All the splits matched successfully.''' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ = True ):
'''simple docstring'''
if record_checksum:
A : Union[str, Any] = shaaaa()
with open(UpperCamelCase__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(UpperCamelCase__ )
A : Dict = m.hexdigest()
else:
A : List[str] = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 367
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __snake_case ):
__magic_name__ = (UniPCMultistepScheduler,)
__magic_name__ = (('''num_inference_steps''', 25),)
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : List[Any] = dict(self.forward_default_kwargs )
A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A, A : Tuple = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = dict(self.forward_default_kwargs )
A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
A : List[Any] = self.dummy_sample
A : int = 0.1 * sample
A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A : Optional[int] = self.get_scheduler_config()
A : Any = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
A : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
A : Dict = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = self.scheduler_classes[0]
A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : int = 10
A : Tuple = self.dummy_model()
A : Any = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = dict(self.forward_default_kwargs )
A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config()
A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.dummy_sample
A : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
A : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
A : List[Any] = scheduler.timesteps[5]
A : Dict = scheduler.timesteps[6]
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
A : Dict = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : int = self.full_loop()
A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = self.scheduler_classes[0]
A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE )
A : Tuple = 10
A : Union[str, Any] = self.dummy_model()
A : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 311
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : float , UpperCamelCase : list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
UpperCAmelCase : Optional[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase ) )
return round(UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str=1_3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=3_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 ,SCREAMING_SNAKE_CASE__ : List[str]=4 ,SCREAMING_SNAKE_CASE__ : int=3_7 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.6 ,SCREAMING_SNAKE_CASE__ : List[str]=None ,):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Dict = patch_size
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : int = use_labels
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : int = hidden_act
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Any = type_sequence_label_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : str = mask_ratio
__lowerCamelCase : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCamelCase : Optional[int] = (image_size // patch_size) ** 2
__lowerCamelCase : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Dict = None
if self.use_labels:
__lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Dict):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = ViTMAEModel(config=A__)
model.to(A__)
model.eval()
__lowerCamelCase : Optional[int] = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : str = ViTMAEForPreTraining(A__)
model.to(A__)
model.eval()
__lowerCamelCase : Any = model(A__)
__lowerCamelCase : Tuple = (self.image_size // self.patch_size) ** 2
__lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Dict = ViTMAEForPreTraining(A__)
model.to(A__)
model.eval()
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Any = model(A__)
__lowerCamelCase : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCAmelCase : Any = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_UpperCAmelCase : Optional[int] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_UpperCAmelCase : int = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = ViTMAEModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Optional[Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[Any]):
pass
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple = model_class(A__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(A__)
__lowerCamelCase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCamelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
# make masks reproducible
np.random.seed(2)
__lowerCamelCase : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
__lowerCamelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__lowerCamelCase : List[Any] = torch.from_numpy(A__)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCamelCase : List[str] = pt_noise
super().check_pt_tf_models(A__ ,A__ ,A__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(A__)
model.to(A__)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
__lowerCamelCase : str = model(**self._prepare_for_class(A__ ,A__))
__lowerCamelCase : int = outputs[0].cpu().numpy()
__lowerCamelCase : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__)
__lowerCamelCase : Any = model_class.from_pretrained(A__)
model.to(A__)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
__lowerCamelCase : Any = model(**self._prepare_for_class(A__ ,A__))
# Make sure we don't have nans
__lowerCamelCase : Dict = after_outputs[0].cpu().numpy()
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(A__ ,1E-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def lowerCAmelCase ( self : Any):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def lowerCAmelCase ( self : List[str]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def lowerCAmelCase ( self : List[Any]):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def lowerCAmelCase ( self : Dict):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self : Optional[int]):
pass
@slow
def lowerCAmelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[int] = ViTMAEModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
__lowerCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : str):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[Any]):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
__lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(A__)
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=A__ ,return_tensors='pt').to(A__)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCamelCase : Any = ViTMAEConfig()
__lowerCamelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__lowerCamelCase : Optional[int] = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
__lowerCamelCase : int = model(**A__ ,noise=torch.from_numpy(A__).to(device=A__))
# verify the logits
__lowerCamelCase : List[str] = torch.Size((1, 1_9_6, 7_6_8))
self.assertEqual(outputs.logits.shape ,A__)
__lowerCamelCase : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(A__) ,atol=1E-4))
| 359
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
a =3
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
print('Generating primitive root of p' )
while True:
__lowerCamelCase : Tuple = random.randrange(3 , lowerCamelCase__ )
if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1:
continue
if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
__lowerCamelCase : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number.
__lowerCamelCase : Dict = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p.
__lowerCamelCase : Optional[int] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety.
__lowerCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase : int = (key_size, e_a, e_a, p)
__lowerCamelCase : str = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> None:
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
__lowerCamelCase , __lowerCamelCase : List[Any] = generate_key(lowerCamelCase__ )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , 'w' ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , 'w' ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2_0_4_8 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 113
| 0
|
import os
def a_ ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(_A ) + '/grid.txt' ) as f:
snake_case__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(_A ) for x in f.readline().split()] )
snake_case__ = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case__ = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
snake_case__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 307
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 1
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __A ( lowerCAmelCase_ ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(lowerCAmelCase_ , """_dynamo""" ):
return False
return isinstance(lowerCAmelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Tuple = is_compiled_module(lowerCAmelCase_ )
if is_compiled:
_UpperCAmelCase : Union[str, Any] = model
_UpperCAmelCase : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : Tuple = getattr(lowerCAmelCase_ , """forward""" )
_UpperCAmelCase : Optional[int] = model.__dict__.pop("""_original_forward""" , lowerCAmelCase_ )
if original_forward is not None:
while hasattr(lowerCAmelCase_ , """__wrapped__""" ):
_UpperCAmelCase : int = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : int = forward
if getattr(lowerCAmelCase_ , """_converted_to_transformer_engine""" , lowerCAmelCase_ ):
convert_model(lowerCAmelCase_ , to_transformer_engine=lowerCAmelCase_ )
if is_compiled:
_UpperCAmelCase : List[Any] = model
_UpperCAmelCase : List[Any] = compiled_model
return model
def __A ( ):
PartialState().wait_for_everyone()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase_ , lowerCAmelCase_ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
@contextmanager
def __A ( **lowerCAmelCase_ ):
for key, value in kwargs.items():
_UpperCAmelCase : Optional[Any] = str(lowerCAmelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __A ( lowerCAmelCase_ ):
if not hasattr(lowerCAmelCase_ , """__qualname__""" ) and not hasattr(lowerCAmelCase_ , """__name__""" ):
_UpperCAmelCase : Dict = getattr(lowerCAmelCase_ , """__class__""" , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , """__qualname__""" ):
return obj.__qualname__
if hasattr(lowerCAmelCase_ , """__name__""" ):
return obj.__name__
return str(lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
for key, value in source.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = destination.setdefault(lowerCAmelCase_ , {} )
merge_dicts(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_UpperCAmelCase : Optional[Any] = value
return destination
def __A ( lowerCAmelCase_ = None ):
if port is None:
_UpperCAmelCase : Dict = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 170
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'rwkv'
A = {'max_position_embeddings': 'context_length'}
def __init__( self : List[Any],_A : int=5_0277,_A : Any=1024,_A : List[str]=4096,_A : Union[str, Any]=32,_A : List[str]=None,_A : List[str]=None,_A : Optional[int]=1E-5,_A : Any=0,_A : Any=0,_A : Any=6,_A : Any=False,_A : Any=True,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_every
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : int = eos_token_id
super().__init__(
tie_word_embeddings=_A,bos_token_id=_A,eos_token_id=_A,**_A )
| 18
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCAmelCase :
def __init__(self ):
A_ : int = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : Tuple = [[w, v]]
if not self.graph.get(lowercase ):
A_ : Union[str, Any] = []
def _a (self ):
return list(self.graph )
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Union[str, Any] = []
A_ : Dict = []
if s == -2:
A_ : List[Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : List[str] = stack[len(lowercase ) - 1]
else:
A_ : str = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : Tuple = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : Union[str, Any] = deque()
A_ : Tuple = []
if s == -2:
A_ : int = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
A_ : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self , lowercase=-2 ):
A_ : Dict = []
A_ : Optional[Any] = []
if s == -2:
A_ : int = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[Any] = s
A_ : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A_ : int = stack[len(lowercase ) - 1]
else:
A_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def _a (self ):
A_ : Dict = []
A_ : Tuple = []
A_ : Any = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Optional[int] = -2
A_ : List[Any] = []
A_ : List[str] = s
A_ : Optional[int] = False
A_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(lowercase ) != 0:
A_ : Union[str, Any] = stack[len(lowercase ) - 1]
else:
A_ : Tuple = False
indirect_parents.append(lowercase )
A_ : Tuple = s
A_ : Tuple = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : Union[str, Any] = []
A_ : str = []
A_ : List[Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[Any] = -2
A_ : Tuple = []
A_ : Optional[Any] = s
A_ : Union[str, Any] = False
A_ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : List[str] = True
if len(lowercase ) != 0:
A_ : Dict = stack[len(lowercase ) - 1]
else:
A_ : int = False
indirect_parents.append(lowercase )
A_ : List[Any] = s
A_ : int = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : str = time()
self.dfs(lowercase , lowercase )
A_ : Any = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : Union[str, Any] = time()
self.bfs(lowercase )
A_ : Optional[Any] = time()
return end - begin
class _lowerCAmelCase :
def __init__(self ):
A_ : List[str] = {}
def _a (self , lowercase , lowercase , lowercase=1 ):
# check if the u exists
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : int = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Any = [[w, u]]
def _a (self , lowercase , lowercase ):
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def _a (self , lowercase=-2 , lowercase=-1 ):
if s == d:
return []
A_ : Dict = []
A_ : List[Any] = []
if s == -2:
A_ : str = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[str] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A_ : Dict = stack[len(lowercase ) - 1]
else:
A_ : List[str] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def _a (self , lowercase=-1 ):
if c == -1:
A_ : Union[str, Any] = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def _a (self , lowercase=-2 ):
A_ : int = deque()
A_ : Optional[Any] = []
if s == -2:
A_ : Optional[int] = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A_ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _a (self , lowercase ):
return len(self.graph[u] )
def _a (self ):
A_ : Optional[int] = []
A_ : Dict = []
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : List[Any] = -2
A_ : List[str] = []
A_ : int = s
A_ : Optional[int] = False
A_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : int = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(lowercase ) != 0:
A_ : Any = stack[len(lowercase ) - 1]
else:
A_ : str = False
indirect_parents.append(lowercase )
A_ : Dict = s
A_ : List[Any] = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def _a (self ):
A_ : Optional[int] = []
A_ : Optional[int] = []
A_ : List[str] = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A_ : Any = -2
A_ : Any = []
A_ : Tuple = s
A_ : str = False
A_ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Any = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Union[str, Any] = True
if len(lowercase ) != 0:
A_ : List[Any] = stack[len(lowercase ) - 1]
else:
A_ : int = False
indirect_parents.append(lowercase )
A_ : int = s
A_ : int = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def _a (self ):
return list(self.graph )
def _a (self , lowercase=-2 , lowercase=-1 ):
A_ : Any = time()
self.dfs(lowercase , lowercase )
A_ : Optional[Any] = time()
return end - begin
def _a (self , lowercase=-2 ):
A_ : List[Any] = time()
self.bfs(lowercase )
A_ : List[Any] = time()
return end - begin
| 206
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Tuple = {}
if top_k is not None:
__lowerCAmelCase : Any = top_k
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = load_image(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : str = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase : Optional[int] = probs.topk(_SCREAMING_SNAKE_CASE )
elif self.framework == "tf":
__lowerCAmelCase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowerCAmelCase : str = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : int = scores.tolist()
__lowerCAmelCase : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
| 350
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__lowerCAmelCase : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 'sgugger/tiny-distilbert-classification'
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
__lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
__lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'sshleifer/tiny-gpt2'
__lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'env.csv' ) ).exists() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sequential' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'cumulative' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'current' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , 'log.txt' ) ).exists() )
| 182
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
debug_launcher(test_script.main )
def __UpperCamelCase ( self ):
debug_launcher(test_ops.main )
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( snake_case_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : List[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = 5
# Realm tok
UpperCAmelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase : Any = os.path.join(lowercase_ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : str = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=lowercase_ , )
return block_records
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : str = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
UpperCAmelCase : Dict = self.get_config()
UpperCAmelCase : Optional[Any] = self.get_dummy_retriever()
UpperCAmelCase : Union[str, Any] = retriever.tokenizer
UpperCAmelCase : Tuple = np.array([0, 3] , dtype='long' )
UpperCAmelCase : Optional[int] = tokenizer(['Test question'] ).input_ids
UpperCAmelCase : Tuple = tokenizer(
['the fourth'] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
UpperCAmelCase : Union[str, Any] = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors='np' )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Any = self.get_config()
UpperCAmelCase : Optional[int] = self.get_dummy_retriever()
UpperCAmelCase : List[Any] = retriever.tokenizer
UpperCAmelCase : Tuple = np.array([0, 3, 5] , dtype='long' )
UpperCAmelCase : Dict = tokenizer(['Test question'] ).input_ids
UpperCAmelCase : Tuple = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
UpperCAmelCase : Tuple = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors='np' )
self.assertEqual([False, True, True] , lowercase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
UpperCAmelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
UpperCAmelCase : Tuple = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase : Tuple = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , B'This is the first record' )
| 353
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase : Optional[int] = (left + right) >> 1 # the middle
UpperCAmelCase : Any = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 280
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
snake_case : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class _snake_case ( _snake_case ):
def __init__( self , **_lowerCamelCase ):
super().__init__(**_lowerCamelCase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
if "text_queries" in kwargs:
a :Tuple = kwargs.pop('''text_queries''' )
if isinstance(_lowerCamelCase , (str, Image.Image) ):
a :Any = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
a :Optional[int] = image
a :Optional[Any] = super().__call__(_lowerCamelCase , **_lowerCamelCase )
return results
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
a :Any = {}
if "threshold" in kwargs:
a :List[str] = kwargs['''threshold''']
if "top_k" in kwargs:
a :Optional[int] = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = load_image(inputs['''image'''] )
a :List[Any] = inputs['''candidate_labels''']
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Tuple = candidate_labels.split(''',''' )
a :Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_lowerCamelCase ):
a :Optional[Any] = self.tokenizer(_lowerCamelCase , return_tensors=self.framework )
a :Optional[int] = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_lowerCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = model_inputs.pop('''target_size''' )
a :int = model_inputs.pop('''candidate_label''' )
a :Optional[Any] = model_inputs.pop('''is_last''' )
a :Dict = self.model(**_lowerCamelCase )
a :str = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0.1 , _lowerCamelCase=None ):
a :int = []
for model_output in model_outputs:
a :List[Any] = model_output['''candidate_label''']
a :Union[str, Any] = BaseModelOutput(_lowerCamelCase )
a :Union[str, Any] = self.image_processor.post_process_object_detection(
outputs=_lowerCamelCase , threshold=_lowerCamelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a :str = outputs['''scores'''][index].item()
a :Dict = self._get_bounding_box(outputs['''boxes'''][index][0] )
a :Tuple = {'''score''': score, '''label''': label, '''box''': box}
results.append(_lowerCamelCase )
a :Optional[int] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )
if top_k:
a :Optional[int] = results[:top_k]
return results
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a :int = box.int().tolist()
a :List[Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 94
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__UpperCamelCase = HfApi()
__UpperCamelCase = {}
# fmt: off
__UpperCamelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__UpperCamelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__UpperCamelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__UpperCamelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__UpperCamelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__UpperCamelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__UpperCamelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__UpperCamelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__UpperCamelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__UpperCamelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__UpperCamelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__UpperCamelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__UpperCamelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__UpperCamelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__UpperCamelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__UpperCamelCase = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__UpperCamelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 113
| 0
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase__ ( lowercase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCamelCase_ ( self : Dict ,**lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Any = {
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] ,[0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config()
_UpperCamelCase : Optional[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Optional[int] = self.dummy_model()
_UpperCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : str = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Union[str, Any] = model(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Union[str, Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Dict = output.prev_sample
_UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_UpperCamelCase : int = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase : int = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : List[Any] = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Tuple = model(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Union[str, Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : Optional[Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase : int = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : List[str] = self.get_scheduler_config()
_UpperCamelCase : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCamelCase_ )
_UpperCamelCase : Optional[Any] = self.dummy_model()
_UpperCamelCase : Optional[Any] = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase : Any = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : str = model(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : List[Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Tuple = output.prev_sample
_UpperCamelCase : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.scheduler_classes[0]
_UpperCamelCase : Dict = self.get_scheduler_config()
_UpperCamelCase : Any = scheduler_class(**lowerCamelCase_ ,use_karras_sigmas=lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCamelCase_ )
_UpperCamelCase : str = self.dummy_model()
_UpperCamelCase : Tuple = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
_UpperCamelCase : int = sample.to(lowerCamelCase_ )
for t in scheduler.timesteps:
_UpperCamelCase : Tuple = scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Tuple = model(lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : List[Any] = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
_UpperCamelCase : Optional[int] = output.prev_sample
_UpperCamelCase : List[Any] = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 355
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ : Tuple = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ : str = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase , _UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 236
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_lowercase : Tuple =logging.get_logger(__name__)
_lowercase : Optional[Any] ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_lowercase : List[str] =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : List[str]) -> Dict:
"""simple docstring"""
for attribute in key.split("""."""):
a__ : Any = getattr(_lowercase , _lowercase)
if weight_type is not None:
a__ : Union[str, Any] = getattr(_lowercase , _lowercase).shape
else:
a__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
a__ : Dict = value
elif weight_type == "weight_g":
a__ : List[str] = value
elif weight_type == "weight_v":
a__ : List[Any] = value
elif weight_type == "bias":
a__ : Union[str, Any] = value
else:
a__ : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Dict) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = []
a__ : List[str] = fairseq_model.state_dict()
a__ : List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
a__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
a__ : int = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""")[:-1]) != key):
# special case since naming is very similar
continue
a__ : Any = True
if "*" in mapped_key:
a__ : Dict = name.split(_lowercase)[0].split(""".""")[-2]
a__ : Optional[Any] = mapped_key.replace("""*""" , _lowercase)
if "weight_g" in name:
a__ : Any = """weight_g"""
elif "weight_v" in name:
a__ : int = """weight_v"""
elif "bias" in name:
a__ : int = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ : Any = """weight"""
else:
a__ : Dict = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
continue
if not is_used:
unused_weights.append(_lowercase)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : int , _lowercase : Any , _lowercase : str , _lowercase : Optional[int]) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = full_name.split("""conv_layers.""")[-1]
a__ : List[str] = name.split(""".""")
a__ : Optional[Any] = int(items[0])
a__ : Optional[int] = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
a__ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
a__ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''')
a__ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''')
a__ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_lowercase)
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Dict=True) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
a__ : Optional[int] = UniSpeechSatConfig.from_pretrained(_lowercase)
else:
a__ : Optional[int] = UniSpeechSatConfig()
a__ : Any = """"""
if is_finetuned:
a__ : Any = UniSpeechSatForCTC(_lowercase)
else:
a__ : str = UniSpeechSatForPreTraining(_lowercase)
a__ , a__ , a__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1])})
a__ : Any = model[0].eval()
recursively_load_weights(_lowercase , _lowercase)
hf_wavavec.save_pretrained(_lowercase)
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : List[str] =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 170
|
_lowercase : Optional[Any] =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
a__ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] =[None] * 1000_0000
_lowercase : Tuple =True
_lowercase : int =False
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a__ : Optional[Any] = chain(next_number(_lowercase))
a__ : Dict = number_chain
while number < 1000_0000:
a__ : Any = number_chain
number *= 10
return number_chain
def lowerCAmelCase_ ( _lowercase : int = 1000_0000) -> int:
"""simple docstring"""
for i in range(1 , _lowercase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 170
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Tuple = state_dict.pop(__UpperCAmelCase )
lowercase__: Any = val
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
lowercase__: List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__: Optional[int] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__: List[Any] = value
else:
lowercase__: List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
lowercase__: Any = ''''''
if is_panoptic:
lowercase__: Union[str, Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__: Dict = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__: Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__: List[Any] = in_proj_weight[:2_5_6, :]
lowercase__: List[str] = in_proj_bias[:2_5_6]
lowercase__: Dict = in_proj_weight[2_5_6:5_1_2, :]
lowercase__: Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
lowercase__: Dict = in_proj_weight[-2_5_6:, :]
lowercase__: List[Any] = in_proj_bias[-2_5_6:]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
lowercase__: Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase__: str = '''resnet101'''
if "dc5" in model_name:
lowercase__: Dict = True
lowercase__: Dict = '''panoptic''' in model_name
if is_panoptic:
lowercase__: Optional[int] = 2_5_0
else:
lowercase__: int = 9_1
lowercase__: Union[str, Any] = '''huggingface/label-files'''
lowercase__: int = '''coco-detection-id2label.json'''
lowercase__: str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__: Any = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase__: List[str] = idalabel
lowercase__: Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
lowercase__: str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase__: Any = ConditionalDetrImageProcessor(format=__UpperCAmelCase )
# prepare image
lowercase__: List[Any] = prepare_img()
lowercase__: Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
lowercase__: Tuple = encoding['''pixel_values''']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
lowercase__: Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowercase__: Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase__: int = '''conditional_detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[Any] = rename_backbone_keys(__UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__: Dict = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Optional[int] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase__: Any = state_dict.pop(__UpperCAmelCase )
lowercase__: List[Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__: str = state_dict.pop(__UpperCAmelCase )
lowercase__: Dict = val
# finally, create HuggingFace model and load state dict
lowercase__: List[str] = ConditionalDetrForSegmentation(__UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase__: str = conditional_detr(__UpperCAmelCase )
lowercase__: Tuple = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self , __A ) -> List[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCAmelCase_ :Union[str, Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :str = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = '''sgugger/tiny-distilbert-classification'''
lowerCAmelCase_ :Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
lowerCAmelCase_ :List[str] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Union[str, Any] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :int = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ :Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :str = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_ :Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ :Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :List[str] = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_ :int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Optional[int] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :int = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ :Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Dict = TensorFlowBenchmark(UpperCamelCase__ , [config] )
lowerCAmelCase_ :str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = '''patrickvonplaten/t5-tiny-random'''
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ :Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Dict = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
lowerCAmelCase_ :Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ :Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Any = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :Optional[Any] = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__A ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCAmelCase_ :List[Any] = TensorFlowBenchmark(UpperCamelCase__ )
lowerCAmelCase_ :Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 84
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCamelCase : Optional[Any] = ['gpt2']
__UpperCamelCase : str = 'gpt2'
if is_tf_available():
class lowercase__ ( tf.Module):
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenized['''input_ids'''].to_tensor()
SCREAMING_SNAKE_CASE : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE : List[Any] = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE : List[str] = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self : str ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Dict = tokenizer([test_inputs] , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE : int = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE : int = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : Optional[int] = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = compiled_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : str = ModelToSave(tokenizer=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCamelCase__ ) / '''saved.model'''
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': model.serving} )
SCREAMING_SNAKE_CASE : str = tf.saved_model.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = loaded_model.signatures['''serving_default'''](UpperCamelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE : Optional[Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE : Tuple = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE : Tuple = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 182
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''sew'''
def __init__( self, lowercase_=32, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_=2, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=0.1, lowercase_=0.0, lowercase_=0.1, lowercase_=0.1, lowercase_=0.02, lowercase_=1E-5, lowercase_="group", lowercase_="gelu", lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowercase_=False, lowercase_=128, lowercase_=16, lowercase_=True, lowercase_=0.05, lowercase_=10, lowercase_=2, lowercase_=0.0, lowercase_=10, lowercase_=0, lowercase_="mean", lowercase_=False, lowercase_=False, lowercase_=256, lowercase_=0, lowercase_=1, lowercase_=2, **lowercase_, ) -> List[Any]:
super().__init__(**lowercase_, pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_ )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = list(lowercase_ )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = squeeze_factor
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# sequence classification
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 371
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332
| 0
|
"""simple docstring"""
def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = [False] * len(A_ )
a : int = []
queue.append(A_ )
a : int = True
while queue:
a : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a : Dict = True
a : Optional[int] = u
return visited[t]
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : int = [-1] * (len(A_ ))
a : List[Any] = 0
while bfs(A_ , A_ , A_ , A_ ):
a : Tuple = float("Inf" )
a : List[str] = sink
while s != source:
# Find the minimum value in select path
a : List[Any] = min(A_ , graph[parent[s]][s] )
a : str = parent[s]
max_flow += path_flow
a : Optional[Any] = sink
while v != source:
a : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a : str = parent[v]
return max_flow
__lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowercase , __lowercase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40
|
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3
| 1
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = ''
__UpperCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCAmelCase : str = None # compression type in fsspec. ex: "gzip"
__UpperCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self : List[str] , __UpperCAmelCase : str = "" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , **__UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(self , **__UpperCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ = fsspec.open(
__UpperCAmelCase , mode="rb" , protocol=__UpperCAmelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ = None
@classmethod
def lowercase_ (cls : int , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super()._strip_protocol(__UpperCAmelCase ).lstrip("/" )
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase__ = {f["name"]: f}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
return self.file.open().read()
def lowercase_ (self : Any , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._strip_protocol(__UpperCAmelCase )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'bz2'
__UpperCAmelCase : Dict = 'bz2'
__UpperCAmelCase : Dict = '.bz2'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'gzip'
__UpperCAmelCase : Optional[Any] = 'gzip'
__UpperCAmelCase : Union[str, Any] = '.gz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'lz4'
__UpperCAmelCase : str = 'lz4'
__UpperCAmelCase : Any = '.lz4'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'xz'
__UpperCAmelCase : int = 'xz'
__UpperCAmelCase : Union[str, Any] = '.xz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : Optional[Any] = '.zst'
def __init__(self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , __UpperCAmelCase : int = DEFAULT_BLOCK_SIZE , **__UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
fo=__UpperCAmelCase , mode=__UpperCAmelCase , target_protocol=__UpperCAmelCase , target_options=__UpperCAmelCase , block_size=__UpperCAmelCase , **__UpperCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ = self.file.__enter__
class A :
def __init__(self : List[Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = file_
def __enter__(self : List[str] ) -> Tuple:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__(self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
self._file.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def __iter__(self : str ) -> List[Any]:
"""simple docstring"""
return iter(self._file )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
return next(self._file )
def __getattr__(self : Optional[int] , __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return getattr(self._file , __UpperCAmelCase )
def fixed_enter(*__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
return WrappedFile(_enter(*__UpperCAmelCase , **__UpperCAmelCase ) )
UpperCAmelCase__ = fixed_enter
| 65
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowerCamelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : List[str] = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase : Any = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Dict = char
return pairs
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]="replace" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Tuple="<mask>" , _UpperCAmelCase : int=False , **_UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_lowerCAmelCase : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_lowerCAmelCase : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_lowerCAmelCase : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Dict = json.load(_UpperCAmelCase )
_lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[Any] = errors # how to handle errors in decoding
_lowerCAmelCase : Dict = bytes_to_unicode()
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[int] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = 0
while i < len(_UpperCAmelCase ):
try:
_lowerCAmelCase : Optional[int] = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Tuple = tuple(_UpperCAmelCase )
_lowerCAmelCase : Dict = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_lowerCAmelCase : Union[str, Any] = get_pairs(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = """ """.join(_UpperCAmelCase )
_lowerCAmelCase : Any = word
return word
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """""".join(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" )
_lowerCAmelCase : Dict = 0
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : str = token_index
writer.write(""" """.join(_UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
_lowerCAmelCase : Optional[Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : str = len(encoded_inputs["""global_attention_mask"""] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
_lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 159
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["ConditionalDetrFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 159
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[int]:
"""simple docstring"""
lowercase__ = state_dict.pop(A )
lowercase__ = val
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _SCREAMING_SNAKE_CASE (A , A=False ) -> Any:
"""simple docstring"""
lowercase__ = ''''''
if is_panoptic:
lowercase__ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowercase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
def _SCREAMING_SNAKE_CASE () -> Any:
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
lowercase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase__ = '''resnet101'''
if "dc5" in model_name:
lowercase__ = True
lowercase__ = '''panoptic''' in model_name
if is_panoptic:
lowercase__ = 250
else:
lowercase__ = 91
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''coco-detection-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowercase__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase__ = ConditionalDetrImageProcessor(format=A )
# prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=A , return_tensors='''pt''' )
lowercase__ = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
lowercase__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , A , pretrained=A ).eval()
lowercase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase__ = '''conditional_detr.''' + src
rename_key(A , A , A )
lowercase__ = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A , is_panoptic=A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase__ = state_dict.pop(A )
lowercase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase__ = state_dict.pop(A )
lowercase__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase__ = state_dict.pop(A )
lowercase__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__ = state_dict.pop(A )
lowercase__ = val
# finally, create HuggingFace model and load state dict
lowercase__ = ConditionalDetrForSegmentation(A ) if is_panoptic else ConditionalDetrForObjectDetection(A )
model.load_state_dict(A )
model.eval()
model.push_to_hub(repo_id=A , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase__ = conditional_detr(A )
lowercase__ = model(A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ='T5Config'
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.zeros_like(A__ )
SCREAMING_SNAKE_CASE_ : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE_ : List[str] = shifted_input_ids.at[:, 0].set(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.where(shifted_input_ids == -1_0_0, A__, A__ )
return shifted_input_ids
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """mt5"""
_UpperCAmelCase = MTaConfig
| 354
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = 42
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = True
@register_to_config
def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 3 , lowerCAmelCase__ = ("DownEncoderBlock2D",) , lowerCAmelCase__ = ("UpDecoderBlock2D",) , lowerCAmelCase__ = (6_4,) , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "silu" , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 0.18_215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE_ : Dict = Encoder(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , down_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , double_z=lowerCAmelCase__ , )
# pass init params to Decoder
SCREAMING_SNAKE_CASE_ : List[str] = Decoder(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , up_block_types=lowerCAmelCase__ , block_out_channels=lowerCAmelCase__ , layers_per_block=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE_ : Any = self.config.sample_size
SCREAMING_SNAKE_CASE_ : Tuple = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE_ : Any = 0.25
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (Encoder, Decoder) ):
SCREAMING_SNAKE_CASE_ : List[Any] = value
def UpperCamelCase__ ( self , lowerCAmelCase__ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_tiling
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.enable_tiling(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {}
def fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , 'set_processor' ):
SCREAMING_SNAKE_CASE_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , lowerCAmelCase__ , lowerCAmelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return processors
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase__ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , 'set_processor' ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
module.set_processor(lowerCAmelCase__ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , lowerCAmelCase__ , lowerCAmelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True ):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.encoder(lowerCAmelCase__ ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Any = self.encoder(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.quant_conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = DiagonalGaussianDistribution(lowerCAmelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True ):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.post_quant_conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.decoder(lowerCAmelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
@apply_forward_hook
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True ):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self._decode(lowerCAmelCase__ ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Dict = self._decode(lowerCAmelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = min(a.shape[2] , b.shape[2] , lowerCAmelCase__ )
for y in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = min(a.shape[3] , b.shape[3] , lowerCAmelCase__ )
for x in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE_ : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(0 , x.shape[2] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = []
for j in range(0 , x.shape[3] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE_ : str = self.encoder(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.quant_conv(lowerCAmelCase__ )
row.append(lowerCAmelCase__ )
rows.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i, row in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for j, tile in enumerate(lowerCAmelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE_ : Any = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase__ , dim=3 ) )
SCREAMING_SNAKE_CASE_ : str = torch.cat(lowerCAmelCase__ , dim=2 )
SCREAMING_SNAKE_CASE_ : int = DiagonalGaussianDistribution(lowerCAmelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE_ : List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(0 , z.shape[2] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for j in range(0 , z.shape[3] , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE_ : Any = self.post_quant_conv(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.decoder(lowerCAmelCase__ )
row.append(lowerCAmelCase__ )
rows.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = []
for i, row in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for j, tile in enumerate(lowerCAmelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE_ : str = self.blend_v(rows[i - 1][j] , lowerCAmelCase__ , lowerCAmelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.blend_h(row[j - 1] , lowerCAmelCase__ , lowerCAmelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCAmelCase__ , dim=3 ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.cat(lowerCAmelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encode(lowerCAmelCase__ ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE_ : Dict = posterior.sample(generator=lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = posterior.mode()
SCREAMING_SNAKE_CASE_ : str = self.decode(lowerCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase__ )
| 162
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( _lowerCAmelCase):
'''simple docstring'''
def __init__( self :int , a :TransformeraDModel , a :AutoencoderKL , a :KarrasDiffusionSchedulers , a :Optional[Dict[int, str]] = None , ) -> str:
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
__UpperCamelCase : Any = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__UpperCamelCase : Optional[int] = int(_lowercase )
__UpperCamelCase : Dict = dict(sorted(self.labels.items() ) )
def _lowerCamelCase ( self :str , a :Union[str, List[str]] ) -> Optional[Any]:
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase : Optional[int] = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :List[str] , a :List[int] , a :float = 4.0 , a :Optional[Union[torch.Generator, List[torch.Generator]]] = None , a :int = 5_0 , a :Optional[str] = "pil" , a :bool = True , ) -> str:
__UpperCamelCase : Dict = len(_lowercase )
__UpperCamelCase : List[str] = self.transformer.config.sample_size
__UpperCamelCase : List[str] = self.transformer.config.in_channels
__UpperCamelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
__UpperCamelCase : List[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__UpperCamelCase : Tuple = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
__UpperCamelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__UpperCamelCase : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__UpperCamelCase : Dict = latent_model_input[: len(_lowercase ) // 2]
__UpperCamelCase : Optional[Any] = torch.cat([half, half] , dim=0 )
__UpperCamelCase : str = self.scheduler.scale_model_input(_lowercase , _lowercase )
__UpperCamelCase : Any = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCamelCase : Optional[int] = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__UpperCamelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCamelCase : Tuple = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__UpperCamelCase : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__UpperCamelCase : Optional[Any] = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
__UpperCamelCase , __UpperCamelCase : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCamelCase , __UpperCamelCase : Dict = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
__UpperCamelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCamelCase : List[str] = torch.cat([half_eps, half_eps] , dim=0 )
__UpperCamelCase : int = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCamelCase , __UpperCamelCase : List[Any] = torch.split(_lowercase , _lowercase , dim=1 )
else:
__UpperCamelCase : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCamelCase : Optional[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
__UpperCamelCase , __UpperCamelCase : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
__UpperCamelCase : Any = latent_model_input
__UpperCamelCase : Any = 1 / self.vae.config.scaling_factor * latents
__UpperCamelCase : Tuple = self.vae.decode(_lowercase ).sample
__UpperCamelCase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCamelCase : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase : List[str] = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 232
|
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : str , _lowercase : str ):
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase )
def a ( self : Optional[int] , _lowercase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self : int , _lowercase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self : Optional[Any] ):
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase : str = 'ABAABA'
_lowercase : Tuple = 'AB'
_lowercase : Dict = BoyerMooreSearch(text, pattern)
_lowercase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 332
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any=5_0_2_7_7 , _lowerCAmelCase : Tuple=1_0_2_4 , _lowerCAmelCase : str=4_0_9_6 , _lowerCAmelCase : Dict=3_2 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=1e-5 , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : List[Any]=6 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=True , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =context_length
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase =intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase =layer_norm_epsilon
__lowercase =rescale_every
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
tie_word_embeddings=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
| 48
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]="binary" , _lowerCAmelCase : Tuple=None):
'''simple docstring'''
__lowercase =fa_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase)
return {"f1": float(_lowerCAmelCase) if score.size == 1 else score}
| 48
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : Dict = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A : Dict = getattr(snake_case__ , snake_case__ ).shape
else:
A : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A : List[str] = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : Optional[Any] = value
elif weight_type == "bias":
A : List[str] = value
else:
A : Tuple = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = []
A : Tuple = fairseq_model.state_dict()
A : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
A : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
A : str = True
if "*" in mapped_key:
A : str = name.split(snake_case__ )[0].split('''.''' )[-2]
A : int = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : List[str] = '''weight_g'''
elif "weight_v" in name:
A : str = '''weight_v'''
elif "weight" in name:
A : List[str] = '''weight'''
elif "bias" in name:
A : Optional[Any] = '''bias'''
else:
A : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = full_name.split('''conv_layers.''' )[-1]
A : Dict = name.split('''.''' )
A : Union[str, Any] = int(items[0] )
A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A : Any = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A : Tuple = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
if config_path is not None:
A : Tuple = HubertConfig.from_pretrained(snake_case__ )
else:
A : Dict = HubertConfig()
if is_finetuned:
if dict_path:
A : Any = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : str = target_dict.pad_index
A : Optional[int] = target_dict.bos_index
A : Optional[int] = target_dict.eos_index
A : Optional[int] = len(target_dict.symbols )
A : Union[str, Any] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
A : Optional[int] = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
A : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
A : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
A : Dict = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A : Union[str, Any] = HubertForCTC(snake_case__ )
else:
A : List[str] = HubertModel(snake_case__ )
if is_finetuned:
A, A, A : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A, A, A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A : int = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : Dict = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A, A : Dict = {}, {}
if padding is not None:
A : List[str] = padding
if truncation is not None:
A : Dict = truncation
if top_k is not None:
A : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = {'''image''': image, '''question''': question}
else:
A : Any = image
A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
A : Union[str, Any] = load_image(inputs['''image'''] )
A : Optional[Any] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
A : Dict = self.model.config.num_labels
if self.framework == "pt":
A : Optional[int] = model_outputs.logits.sigmoid()[0]
A, A : int = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A : int = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 3
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
a_ : List[Any] = ['pixel_values']
def __init__( self : Optional[int] , a_ : Dict = True , a_ : Tuple = None , a_ : int = PILImageResampling.BILINEAR , a_ : int = True , a_ : int = None , a_ : int = True , a_ : str = 1 / 2_55 , a_ : List[str] = True , a_ : Optional[int] = None , a_ : Any = None , **a_ : Tuple , ):
super().__init__(**a_ )
lowerCAmelCase_ : List[Any] = size if size is not None else {"shortest_edge": 2_56}
lowerCAmelCase_ : Dict = get_size_dict(a_ , default_to_square=a_ )
lowerCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowerCAmelCase_ : Optional[int] = get_size_dict(a_ )
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : int = size
lowerCAmelCase_ : Optional[int] = resample
lowerCAmelCase_ : Dict = do_center_crop
lowerCAmelCase_ : List[Any] = crop_size
lowerCAmelCase_ : str = do_rescale
lowerCAmelCase_ : int = rescale_factor
lowerCAmelCase_ : Optional[Any] = do_normalize
lowerCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : Tuple , a_ : List[Any] , a_ : Any , a_ : Tuple = PILImageResampling.BICUBIC , a_ : List[str] = None , **a_ : List[Any] , ):
lowerCAmelCase_ : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCAmelCase_ : Union[str, Any] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : int , a_ : Optional[int] , a_ : List[str] , a_ : Any = None , **a_ : Optional[Any] , ):
lowerCAmelCase_ : Union[str, Any] = get_size_dict(a_ )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] , a_ : List[Any] , a_ : str = None , **a_ : str ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : str , a_ : Any , a_ : List[Any] , a_ : List[str] , a_ : Dict = None , **a_ : List[Any] , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : Any , a_ : List[Any] , a_ : int = None , a_ : List[Any] = None , a_ : Tuple = None , a_ : Any = None , a_ : List[Any] = None , a_ : Any = None , a_ : Optional[int] = None , a_ : Union[str, Any] = None , a_ : List[Any] = None , a_ : List[str] = None , a_ : Optional[Any] = None , a_ : Optional[int] = ChannelDimension.FIRST , **a_ : int , ):
lowerCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = size if size is not None else self.size
lowerCAmelCase_ : Dict = get_size_dict(a_ , default_to_square=a_ )
lowerCAmelCase_ : List[Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : int = get_size_dict(a_ )
lowerCAmelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[int] = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowerCAmelCase_ : Optional[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Tuple = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : List[str] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : Optional[int] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
lowerCAmelCase_ : str = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowerCAmelCase_ : Any = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 356
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase__ = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 161
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 159
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->int:
'''simple docstring'''
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Optional[int]="" , lowerCAmelCase_ :int="." ):
snake_case_ = []
for k, v in d.items():
snake_case_ = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase_ )
snake_case_ = argparse.Namespace()
with open(lowerCAmelCase_ , "r" ) as yaml_file:
try:
snake_case_ = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader )
snake_case_ = flatten_yaml_as_dict(lowerCAmelCase_ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple )->Union[str, Any]:
'''simple docstring'''
snake_case_ = MobileViTVaConfig()
snake_case_ = False
# dataset
if task_name.startswith("imagenet1k_" ):
snake_case_ = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
snake_case_ = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
snake_case_ = 384
else:
snake_case_ = 256
snake_case_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
snake_case_ = 151
snake_case_ = 512
snake_case_ = "ade20k-id2label.json"
snake_case_ = True
elif task_name.startswith("voc_" ):
snake_case_ = 21
snake_case_ = 512
snake_case_ = "pascal-voc-id2label.json"
snake_case_ = True
# orig_config
snake_case_ = load_orig_config_file(lowerCAmelCase_ )
assert getattr(lowerCAmelCase_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(lowerCAmelCase_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
snake_case_ = getattr(lowerCAmelCase_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
snake_case_ = getattr(lowerCAmelCase_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
snake_case_ = "huggingface/label-files"
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Optional[Any] )->Optional[Any]:
'''simple docstring'''
snake_case_ = dct.pop(lowerCAmelCase_ )
snake_case_ = val
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :int=False )->Dict:
'''simple docstring'''
if base_model:
snake_case_ = ""
else:
snake_case_ = "mobilevitv2."
snake_case_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
snake_case_ = k[8:]
else:
snake_case_ = k
if ".block." in k:
snake_case_ = k_new.replace(".block." , "." )
if ".conv." in k:
snake_case_ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
snake_case_ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
snake_case_ = k_new.replace("conv_1." , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
snake_case_ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
snake_case_ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
snake_case_ = [0, 1]
elif i == 4:
snake_case_ = [0, 1, 2, 3]
elif i == 5:
snake_case_ = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
snake_case_ = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
snake_case_ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
snake_case_ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
snake_case_ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
snake_case_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
snake_case_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
snake_case_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
snake_case_ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
snake_case_ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
snake_case_ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
snake_case_ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Optional[int]:
'''simple docstring'''
snake_case_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowerCAmelCase_ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
snake_case_ = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ )
# load original state_dict
snake_case_ = torch.load(lowerCAmelCase_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
snake_case_ = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval()
snake_case_ = False
else:
snake_case_ = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval()
snake_case_ = False
# remove and rename some keys of load the original model
snake_case_ = checkpoint
remove_unused_keys(lowerCAmelCase_ )
snake_case_ = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
snake_case_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ = model(**lowerCAmelCase_ )
# verify classification model
if task_name.startswith("imagenet" ):
snake_case_ = outputs.logits
snake_case_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
snake_case_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 159
| 1
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ = logging.getLogger(__name__)
a_ = 'pytorch_model.bin'
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
snake_case_ = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """The name of the task to train on."""} , )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
snake_case_ = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
snake_case_ = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
snake_case_ = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
snake_case_ = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
snake_case_ = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
snake_case_ = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
snake_case_ = dataclasses.field(
default=lowerCamelCase , metadata={"""help""": """Random seed for initialization."""} , )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =dataset.sort('''probability''', reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =dataset.select(range(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : int =dataset.remove_columns(['''label''', '''probability'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =dataset.rename_column('''prediction''', '''label''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE__ : Optional[int] =dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(UpperCamelCase__, f"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__, index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict, **UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE__ : Optional[int] =STModelArguments(model_name_or_path=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =STDataArguments(train_file=UpperCamelCase__, infer_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =STTrainingArguments(output_dir=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__, UpperCamelCase__ ):
setattr(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Sanity checks
SCREAMING_SNAKE_CASE__ : int ={}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE__ : Tuple =args.train_file
SCREAMING_SNAKE_CASE__ : List[Any] =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE__ : Optional[int] =args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE__ : List[Any] =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE__ : str =extension
else:
assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
SCREAMING_SNAKE_CASE__ : List[Any] =f"{args.output_dir}/self-train_iter-{{}}".format
SCREAMING_SNAKE_CASE__ : List[str] =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ : int =None
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : Any =False
# Show the progress bar
SCREAMING_SNAKE_CASE__ : int =tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, '''stage-1''' )
SCREAMING_SNAKE_CASE__ : Dict ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__, UpperCamelCase__ ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''best-checkpoint''', UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', UpperCamelCase__, UpperCamelCase__, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE__ : Dict =os.path.join(UpperCamelCase__, '''best-checkpoint''' )
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, '''stage-2''' )
# Update arguments_dict
SCREAMING_SNAKE_CASE__ : int =model_path
SCREAMING_SNAKE_CASE__ : Any =data_files['''train''']
SCREAMING_SNAKE_CASE__ : List[Any] =current_output_dir
SCREAMING_SNAKE_CASE__ : int =os.path.join(UpperCamelCase__, '''best-checkpoint''', UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', UpperCamelCase__, UpperCamelCase__, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =iteration
SCREAMING_SNAKE_CASE__ : List[str] =data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE__ : List[str] =AutoConfig.from_pretrained(os.path.join(UpperCamelCase__, '''best-checkpoint''' ) )
SCREAMING_SNAKE_CASE__ : Any =config.idalabel
SCREAMING_SNAKE_CASE__ : int =os.path.join(UpperCamelCase__, '''eval_results_best-checkpoint.json''' )
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, '''test_results_best-checkpoint.json''' )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__, '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : int =float(json.load(UpperCamelCase__ )[args.eval_metric] )
SCREAMING_SNAKE_CASE__ : Any =os.path.join(UpperCamelCase__, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
SCREAMING_SNAKE_CASE__ : Optional[int] =load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__, os.path.join(UpperCamelCase__, f"eval_results_iter-{iteration}.json" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__, os.path.join(UpperCamelCase__, f"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.path.join(UpperCamelCase__, f"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE__ : Dict =eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE__ : Dict =new_iteration
SCREAMING_SNAKE_CASE__ : int =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE__ : Tuple =new_iteration
SCREAMING_SNAKE_CASE__ : Optional[int] =new_eval_result
SCREAMING_SNAKE_CASE__ : Optional[int] =0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE__ : List[str] =new_iteration
SCREAMING_SNAKE_CASE__ : str =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE__ : int =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', UpperCamelCase__ )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__, f"eval_results_iter-{iteration}.json" ), os.path.join(UpperCamelCase__, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__, f"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ), os.path.join(UpperCamelCase__, '''eval_results_best-iteration.json''' ), )
| 222
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : str , __lowercase : Union[str, Any]=4_23_84 , __lowercase : Union[str, Any]=10_24 , __lowercase : Any=24 , __lowercase : Any=16 , __lowercase : Optional[Any]=40_96 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=10_24 , __lowercase : List[Any]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=True , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : str=1 , __lowercase : int=0 , __lowercase : Optional[int]=2 , **__lowercase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding
SCREAMING_SNAKE_CASE__ : str =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Dict =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
| 222
| 1
|
lowercase_ = {}
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A__ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A__ = _calculate(days - 1 , UpperCAmelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A__ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A__ = _calculate(days - 1 , UpperCAmelCase__ , 0 )
A__ = state_late + state_absent + state_ontime
A__ = prizestrings
return prizestrings
def _snake_case( SCREAMING_SNAKE_CASE__ : Any = 30 ) -> int:
'''simple docstring'''
return _calculate(UpperCAmelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 7
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = {}
A_ = job["""started_at"""]
A_ = job["""completed_at"""]
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A_ = start
A_ = end
A_ = duration_in_min
return job_info
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Union[str, Any]:
A_ = None
if token is not None:
A_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A_ = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A_ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(UpperCAmelCase__ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=UpperCAmelCase__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = get_job_time(args.workflow_run_id)
__lowerCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 162
| 0
|
'''simple docstring'''
import functools
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = len(_A )
_UpperCAmelCase : List[str] = len(_A )
@functools.cache
def min_distance(lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCAmelCase : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _A ) , 1 + min_distance(_A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
| 0
|
SCREAMING_SNAKE_CASE__ : Optional[int] = 65521
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[str] = 1
lowerCamelCase : str = 0
for plain_chr in plain_text:
lowerCamelCase : Dict = (a + ord(_SCREAMING_SNAKE_CASE )) % MOD_ADLER
lowerCamelCase : Any = (b + a) % MOD_ADLER
return (b << 16) | a
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
'''simple docstring'''
import pprint
import requests
a : int = """https://zenquotes.io/api"""
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a : int = random_quotes()
pprint.pprint(response)
| 338
|
'''simple docstring'''
a : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : Optional[int] = input("""Enter message: """ )
UpperCAmelCase : Dict = input("""Enter key [alphanumeric]: """ )
UpperCAmelCase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCAmelCase : List[str] = """encrypt"""
UpperCAmelCase : List[str] = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith("""d""" ):
UpperCAmelCase : Tuple = """decrypt"""
UpperCAmelCase : str = decrypt_message(_lowercase , _lowercase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return translate_message(_lowercase , _lowercase , """encrypt""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return translate_message(_lowercase , _lowercase , """decrypt""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Tuple = key.upper()
for symbol in message:
UpperCAmelCase : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowercase ):
UpperCAmelCase : Optional[int] = 0
else:
translated.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
main()
| 338
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : int = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''falcon'''
__magic_name__ = ['''past_key_values''']
def __init__( self , SCREAMING_SNAKE_CASE=65024 , SCREAMING_SNAKE_CASE=4544 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=71 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=11 , SCREAMING_SNAKE_CASE=11 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
A : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
A : Dict = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE )
A : int = hidden_size if n_embed is None else n_embed
A : Dict = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[Any] = layer_norm_epsilon
A : List[Any] = initializer_range
A : str = use_cache
A : Optional[int] = hidden_dropout
A : Union[str, Any] = attention_dropout
A : Dict = bos_token_id
A : List[Any] = eos_token_id
A : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
A : int = alibi
A : List[str] = new_decoder_architecture
A : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
A : int = parallel_attn
A : Union[str, Any] = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 3
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A = cva.getAffineTransform(UpperCAmelCase , UpperCAmelCase )
return cva.warpAffine(UpperCAmelCase , UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a__ : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Optional[int] = gray_img.shape
# set different points to rotate image
a__ : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : int = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : List[str] = plt.figure(1)
a__ : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 161
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowercase = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_lowercase = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_lowercase = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def A (__lowerCamelCase :int , __lowerCamelCase :List[Any] ):
return float((preds == labels).mean() )
def A (__lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :Any="binary" ):
_lowerCAmelCase = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A (__lowerCamelCase :str , __lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = {}
for id_pred, label in zip(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
_lowerCAmelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCAmelCase = [(pred, label)]
_lowerCAmelCase , _lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
_lowerCAmelCase , _lowerCAmelCase = zip(*__lowerCamelCase )
_lowerCAmelCase = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average="""macro""" )
fas.append(__lowerCamelCase )
_lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCamelCase ) )
ems.append(__lowerCamelCase )
_lowerCAmelCase = float(sum(__lowerCamelCase ) / len(__lowerCamelCase ) )
_lowerCAmelCase = sum(__lowerCamelCase ) / len(__lowerCamelCase )
_lowerCAmelCase = float(fa_score(y_true=__lowerCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
_lowerCAmelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
_lowerCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 363
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''convbert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_UpperCAmelCase : Optional[int] = HUGGINGFACE_HUB_CACHE
_UpperCAmelCase : List[str] = "config.json"
_UpperCAmelCase : Union[str, Any] = "diffusion_pytorch_model.bin"
_UpperCAmelCase : List[Any] = "diffusion_flax_model.msgpack"
_UpperCAmelCase : Optional[Any] = "model.onnx"
_UpperCAmelCase : int = "diffusion_pytorch_model.safetensors"
_UpperCAmelCase : Optional[Any] = "weights.pb"
_UpperCAmelCase : Tuple = "https://huggingface.co"
_UpperCAmelCase : Union[str, Any] = default_cache_path
_UpperCAmelCase : Optional[Any] = "diffusers_modules"
_UpperCAmelCase : List[Any] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_UpperCAmelCase : Tuple = ["fp16", "non-ema"]
_UpperCAmelCase : Any = ".self_attn"
| 222
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowercase ) -> list[int]:
return [ord(lowercase ) - 96 for elem in plain]
def _lowerCAmelCase ( lowercase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowerCAmelCase ( ) -> None:
__lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , lowercase )
print("""Decoded:""" , decode(lowercase ) )
if __name__ == "__main__":
main()
| 46
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = "layer_norm",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if only_cross_attention else None,upcast_attention=__SCREAMING_SNAKE_CASE,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if not double_self_attention else None,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,upcast_attention=__SCREAMING_SNAKE_CASE,) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FeedForward(__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,activation_fn=__SCREAMING_SNAKE_CASE,final_dropout=__SCREAMING_SNAKE_CASE )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(__SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(__SCREAMING_SNAKE_CASE,dim=self._chunk_dim )],dim=self._chunk_dim,)
else:
__lowerCAmelCase = self.ff(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 4,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,approximate="""tanh""" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(__SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for module in self.net:
__lowerCAmelCase = module(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "none" ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = approximate
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ),approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.gelu(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,dim_out * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE ).chunk(2,dim=-1 )
return hidden_states * self.gelu(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Embedding(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,6 * embedding_dim,bias=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE,eps=1e-6 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6,dim=1 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 1e-5 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,out_dim * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.act:
__lowerCAmelCase = self.act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.linear(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2,dim=1 )
__lowerCAmelCase = F.group_norm(__SCREAMING_SNAKE_CASE,self.num_groups,eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 46
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
_lowercase : Union[str, Any] = eval_examples
_lowercase : Dict = post_process_function
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase = "eval") -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_lowercase : str = self.get_eval_dataloader(lowerCamelCase)
_lowercase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : List[str] = self.compute_metrics
_lowercase : Dict = None
_lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : Optional[Any] = time.time()
try:
_lowercase : Any = eval_loop(
lowerCamelCase, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : List[str] = compute_metrics
_lowercase : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowercase : Any = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions)
_lowercase : Optional[Any] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : List[str] = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
else:
_lowercase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_lowercase : List[Any] = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase)
return metrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase = "test") -> str:
"""simple docstring"""
_lowercase : str = self.get_test_dataloader(lowerCamelCase)
# Temporarily disable metric computation, we will do it in the loop here.
_lowercase : str = self.compute_metrics
_lowercase : Tuple = None
_lowercase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_lowercase : Optional[int] = time.time()
try:
_lowercase : Optional[Any] = eval_loop(
lowerCamelCase, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
_lowercase : str = compute_metrics
_lowercase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowercase : Optional[int] = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, 'predict')
_lowercase : Optional[int] = self.compute_metrics(lowerCamelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F'''{metric_key_prefix}_'''):
_lowercase : str = metrics.pop(lowerCamelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase)
| 21
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Tuple ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = "swinv2"
__lowerCAmelCase :List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __lowercase=2_2_4 , __lowercase=4 , __lowercase=3 , __lowercase=9_6 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 1_2, 2_4] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.0_2 , __lowercase=1E-5 , __lowercase=3_2 , **__lowercase , ) -> Any:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : Optional[Any] = image_size
a__ : Union[str, Any] = patch_size
a__ : List[Any] = num_channels
a__ : Union[str, Any] = embed_dim
a__ : Any = depths
a__ : List[str] = len(__lowercase )
a__ : Optional[Any] = num_heads
a__ : Union[str, Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : List[str] = qkv_bias
a__ : Dict = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : List[Any] = drop_path_rate
a__ : Tuple = hidden_act
a__ : Dict = use_absolute_embeddings
a__ : Tuple = layer_norm_eps
a__ : Tuple = initializer_range
a__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : int = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
a__ : Dict = (0, 0, 0, 0)
| 170
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_snake_case = 16
_snake_case = 32
def _A ( __magic_name__ , __magic_name__ = 16 , __magic_name__ = "bert-base-cased" ):
lowercase__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
model.eval()
lowercase__ = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
lowercase__ = metric.compute()
return eval_metric["accuracy"]
def _A ( __magic_name__ , __magic_name__ ):
# Initialize accelerator
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(UpperCAmelCase_ )
lowercase__ , lowercase__ = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
lowercase__ = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = num_epochs
if args.partial_train_epoch is not None:
lowercase__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowercase__ = args.resume_from_checkpoint.split("epoch_" )[1]
lowercase__ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowercase__ = int(UpperCAmelCase_ ) + 1
lowercase__ = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.print("resumed checkpoint performance:" , UpperCAmelCase_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
lowercase__ = json.load(UpperCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowercase__ = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
lowercase__ = model(**UpperCAmelCase_ )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowercase__ = f'''epoch_{epoch}'''
lowercase__ = os.path.join(args.output_dir , UpperCAmelCase_ )
accelerator.save_state(UpperCAmelCase_ )
lowercase__ = evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowercase__ = accuracy
lowercase__ = lr_scheduler.get_lr()[0]
lowercase__ = optimizer.param_groups[0]["lr"]
lowercase__ = epoch
lowercase__ = overall_step
accelerator.print(f'''epoch {epoch}:''' , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase_ , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=UpperCAmelCase_ , default=2 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 353
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ):
lowercase__ = HfArgumentParser(__magic_name__ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(__magic_name__ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(__magic_name__ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 201
| 0
|
import pprint
import requests
lowercase__ : Tuple = '''https://zenquotes.io/api'''
def SCREAMING_SNAKE_CASE_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def SCREAMING_SNAKE_CASE_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowercase__ : Tuple = random_quotes()
pprint.pprint(response)
| 338
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''MCTCTFeatureExtractor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__lowerCamelCase = kwargs.pop('raw_speech' )
else:
__lowerCamelCase = kwargs.pop('audio' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('sampling_rate' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('text' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowerCamelCase = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
__lowerCamelCase = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase = encodings['input_ids']
return inputs
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('input_features' , lowerCamelCase__ )
__lowerCamelCase = kwargs.pop('labels' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if input_features is not None:
__lowerCamelCase = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
__lowerCamelCase = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__lowerCamelCase = labels['input_ids']
return input_features
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def lowercase_ ( self ) -> int:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__lowerCamelCase = True
__lowerCamelCase = self.tokenizer
yield
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
| 361
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ["model.decoder.embed_positions.weights"]
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "emb" in name:
__lowerCamelCase = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__lowerCamelCase = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__lowerCamelCase = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__lowerCamelCase = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__lowerCamelCase = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__lowerCamelCase = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__lowerCamelCase = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__lowerCamelCase = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict , UpperCamelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]="cpu" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ )
__lowerCamelCase = decoder_config_from_checkpoint(UpperCamelCase__ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
UpperCamelCase__ , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('t5-base' )
__lowerCamelCase = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__lowerCamelCase = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('t5-base' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
__A = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 348
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A =[8, 5, 9, 7]
A =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _a :
def __init__( self : str , lowercase : list[int] , lowercase : list[list[int]] , lowercase : list[list[int]] , ):
'''simple docstring'''
UpperCAmelCase = claim_vector
UpperCAmelCase = allocated_resources_table
UpperCAmelCase = maximum_claim_table
def A ( self : List[str] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A ( self : int ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A ( self : int ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A ( self : Optional[Any] ):
'''simple docstring'''
return {self.__need().index(SCREAMING_SNAKE_CASE__ ): i for i in self.__need()}
def A ( self : Dict , **lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.__need()
UpperCAmelCase = self.__allocated_resources_table
UpperCAmelCase = self.__available_resources()
UpperCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCAmelCase = False
for each_need in need_list:
UpperCAmelCase = True
for index, need in enumerate(SCREAMING_SNAKE_CASE__ ):
if need > available_resources[index]:
UpperCAmelCase = False
break
if execution:
UpperCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE__ )
# update available/freed resources stack
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE__ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A ( self : Optional[Any] ):
'''simple docstring'''
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE__ ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE__ ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(SCREAMING_SNAKE_CASE__ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(SCREAMING_SNAKE_CASE__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Any ):
"""simple docstring"""
__a =b.T
__a =np.sum(np.square(_snake_case ) , axis=1 )
__a =np.sum(np.square(_snake_case ) , axis=0 )
__a =np.matmul(_snake_case , _snake_case )
__a =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase_( _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =x.reshape(-1 , 3 )
__a =squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BILINEAR , __snake_case = True , __snake_case = True , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'height': 256, 'width': 256}
__a =get_size_dict(__snake_case )
__a =np.array(__snake_case ) if clusters is not None else None
__a =do_resize
__a =size
__a =resample
__a =do_normalize
__a =do_color_quantize
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BILINEAR , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case , size=(size['height'], size['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , ) -> np.ndarray:
'''simple docstring'''
__a =rescale(image=__snake_case , scale=1 / 127.5 , data_format=__snake_case )
__a =image - 1
return image
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case )
__a =resample if resample is not None else self.resample
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__a =clusters if clusters is not None else self.clusters
__a =np.array(__snake_case )
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
__a =[to_channel_dimension_format(__snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__a =np.array(__snake_case )
__a =color_quantize(__snake_case , __snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__a =images.shape[0]
__a =images.reshape(__snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__a =list(__snake_case )
else:
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'input_ids': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 360
|
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 20}
lowerCAmelCase = do_thumbnail
lowerCAmelCase = do_align_axis
lowerCAmelCase = do_pad
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def _snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DonutImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_thumbnail""" ) )
self.assertTrue(hasattr(lowercase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowercase , """do_pad""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _snake_case ( self ) -> Tuple:
pass
@is_flaky()
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 46
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ = 2
class a :
def __init__( self : List[Any] , *, # begin keyword-only arguments
__lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Optional[int]="<pad>" , __lowerCAmelCase : Any="</s>" , __lowerCAmelCase : List[str]="<unk>" , __lowerCAmelCase : Any=None , ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = self.add_symbol(__lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self : str , __lowerCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
return len(self.symbols )
def __contains__( self : Optional[int] , __lowerCAmelCase : List[Any] ):
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : str ):
_UpperCAmelCase = cls()
d.add_from_file(__lowerCAmelCase )
return d
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Tuple=False ):
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(__lowerCAmelCase )
self.count.append(__lowerCAmelCase )
return idx
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] ):
return 0
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__lowerCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(__lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(""" """ , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(__lowerCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__lowerCAmelCase ) )
self.add_symbol(__lowerCAmelCase , n=__lowerCAmelCase , overwrite=__lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_UpperCAmelCase = dict((re.sub(R"""@@$""" ,"""""" ,lowercase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" ,"""</w>""" ,lowercase ), v) for k, v in d.items() )
_UpperCAmelCase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_UpperCAmelCase = d[k] # restore
return da
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# prep
if not os.path.exists(lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowercase ,exist_ok=lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_UpperCAmelCase = os.path.join(lowercase ,"""checkpoint.pt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
_UpperCAmelCase = torch.load(lowercase ,map_location="""cpu""" )
_UpperCAmelCase = chkpt["""cfg"""]["""model"""]
# dicts
_UpperCAmelCase = os.path.join(lowercase ,"""dict.txt""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
_UpperCAmelCase = Dictionary.load(lowercase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(lowercase ,"""bpecodes""" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
_UpperCAmelCase = os.path.join(lowercase ,VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowercase ,lowercase )
# model config
_UpperCAmelCase = os.path.join(lowercase ,"""config.json""" )
_UpperCAmelCase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
_UpperCAmelCase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 10_24,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase ,ensure_ascii=lowercase ,indent=lowercase ) )
# model
_UpperCAmelCase = chkpt["""model"""]
# remove unneeded keys
_UpperCAmelCase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase ,lowercase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
_UpperCAmelCase = model_state_dict.pop(lowercase )
else:
_UpperCAmelCase = model_state_dict.pop(lowercase )
_UpperCAmelCase = BioGptConfig.from_pretrained(lowercase )
_UpperCAmelCase = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
_UpperCAmelCase = os.path.join(lowercase ,lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowercase ,lowercase )
print("""Conversion is done!""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A :
def __init__(self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Any=1_3 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=2_4 , __UpperCAmelCase : List[str]=1_6 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=3_2 , __UpperCAmelCase : str=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : int="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[int]=1_0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = max_length
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = frequency_stride
UpperCAmelCase__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ = num_patches + 2
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, input_values, labels
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = ASTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = False
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = ASTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["input_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = ASTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" )
UpperCAmelCase__ , UpperCAmelCase__ = torchaudio.load(__A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCAmelCase )
UpperCAmelCase__ = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ = prepare_audio()
UpperCAmelCase__ = audio.squeeze().numpy()
UpperCAmelCase__ = feature_extractor(__UpperCAmelCase , sampling_rate=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 65
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __UpperCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 201
| 0
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase : Tuple = torch.permute(lowerCAmelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
_UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if "metadata" in layer:
_UpperCAmelCase : List[str] = layer.split("""metadata""" )
_UpperCAmelCase : int = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase : Any = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_UpperCAmelCase : Tuple = layer.split("""kvstore""" )
_UpperCAmelCase : List[str] = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase : Dict = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_UpperCAmelCase : str = layer.split("""/""" )
_UpperCAmelCase : Optional[int] = """/""".join(split_layer[:-1] )
_UpperCAmelCase : int = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase : Union[str, Any] = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase : Any = """file"""
else:
_UpperCAmelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = rename_keys(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = {}
for k, v in current_block.items():
_UpperCAmelCase : List[str] = v
_UpperCAmelCase : Optional[int] = new_current_block
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = WEIGHTS_NAME )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = convert_file_size_to_int(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = 0
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_UpperCAmelCase : List[str] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_UpperCAmelCase : Dict = flatten_dict(lowerCAmelCase_ , sep="""/""" )
_UpperCAmelCase : List[str] = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase : int = get_key_and_tensorstore_dict(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase : str = content
else:
_UpperCAmelCase : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase : List[Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = """/""".join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = raw_weights.to(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCAmelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase_ , lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : str = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = shard
for key in shard:
_UpperCAmelCase : List[Any] = shard_file
# Add the metadata
_UpperCAmelCase : str = {"""total_size""": total_size}
_UpperCAmelCase : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase : Tuple = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n"""
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
A_ : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def snake_case_ ( )-> Tuple:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase : Tuple = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_UpperCAmelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_UpperCAmelCase : Dict = TaTokenizer.from_pretrained("""t5-small""" )
_UpperCAmelCase : int = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_UpperCAmelCase : List[str] = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
_UpperCAmelCase : Any = model.generate(lowerCAmelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 363
|
'''simple docstring'''
import math
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = len(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
_UpperCAmelCase : int = 0
while arr[min(lowerCAmelCase_ , lowerCAmelCase_ ) - 1] < x:
_UpperCAmelCase : Optional[int] = step
step += int(math.floor(math.sqrt(lowerCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase : List[Any] = prev + 1
if prev == min(lowerCAmelCase_ , lowerCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : str = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
A_ : int = int(input("""Enter the number to be searched:\n"""))
A_ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f"""Number {x} is at index {res}""")
| 349
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE = """encodec"""
def __init__( self , lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase=24_000 , lowercase=1 , lowercase=False , lowercase=None , lowercase=None , lowercase=128 , lowercase=32 , lowercase=1 , lowercase=[8, 5, 4, 2] , lowercase="weight_norm" , lowercase=7 , lowercase=7 , lowercase=3 , lowercase=2 , lowercase=True , lowercase="reflect" , lowercase=2 , lowercase=2 , lowercase=1.0 , lowercase=1_024 , lowercase=None , lowercase=True , **lowercase , ) -> List[str]:
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**snake_case__ )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self ) -> int:
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 46
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__snake_case = '''▁'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = BigBirdTokenizer
__lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : List[int] = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<unk>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="[SEP]" , snake_case__="[MASK]" , snake_case__="[CLS]" , **snake_case__ , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase : Tuple =vocab_file
UpperCAmelCase : Optional[int] =False if not self.vocab_file else True
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : int =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[self.sep_token_id]
UpperCAmelCase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Optional[int] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 348
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int]) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(_lowerCamelCase , (list, tuple)) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase) for number in numbers):
raise ValueError("numbers must be an iterable of integers")
__UpperCamelCase : List[str] = numbers[0]
for i in range(1 , len(_lowerCamelCase)):
# update the maximum and minimum subarray products
__UpperCamelCase : Union[str, Any] = numbers[i]
if number < 0:
__UpperCamelCase : Union[str, Any] = min_till_now, max_till_now
__UpperCamelCase : Optional[Any] = max(_lowerCamelCase , max_till_now * number)
__UpperCamelCase : Any = min(_lowerCamelCase , min_till_now * number)
# update the maximum product found till now
__UpperCamelCase : Any = max(_lowerCamelCase , _lowerCamelCase)
return max_prod
| 361
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Dict:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :ArgumentParser ) -> str:
__UpperCamelCase : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=a )
download_parser.add_argument(
"--accelerate-config_file" , default=a , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a )
def __init__( self :Tuple , a :Dict , *a :List[str] ) -> None:
__UpperCamelCase : List[str] = accelerate_config_file
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : int = "not installed"
if is_safetensors_available():
import safetensors
__UpperCamelCase : List[str] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
__UpperCamelCase : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
__UpperCamelCase : List[str] = "not installed"
__UpperCamelCase : List[str] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCamelCase : Tuple = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a ):
__UpperCamelCase : Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCamelCase : int = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(a , a )
else f'\t{accelerate_config}'
)
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Dict = "NA"
if is_torch_available():
import torch
__UpperCamelCase : Optional[int] = torch.__version__
__UpperCamelCase : Optional[Any] = torch.cuda.is_available()
__UpperCamelCase : Dict = "not installed"
__UpperCamelCase : str = "NA"
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
__UpperCamelCase : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCamelCase : Optional[Any] = bool(tf.config.list_physical_devices("GPU" ) )
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Any = "not installed"
__UpperCamelCase : Tuple = "not installed"
__UpperCamelCase : Optional[int] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCamelCase : int = flax.__version__
__UpperCamelCase : Any = jax.__version__
__UpperCamelCase : Optional[int] = jaxlib.__version__
__UpperCamelCase : List[Any] = jax.lib.xla_bridge.get_backend().platform
__UpperCamelCase : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def _lowerCamelCase ( a :str ) -> int:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 151
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __magic_name__ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCamelCase = '''mgp-str'''
def __init__( self :Optional[int] , snake_case :Optional[Any]=[32, 128] , snake_case :Dict=4 , snake_case :Tuple=3 , snake_case :int=27 , snake_case :List[Any]=38 , snake_case :Any=50_257 , snake_case :Dict=30_522 , snake_case :str=768 , snake_case :Union[str, Any]=12 , snake_case :Dict=12 , snake_case :str=4.0 , snake_case :str=True , snake_case :Optional[int]=False , snake_case :Optional[int]=1e-5 , snake_case :Union[str, Any]=0.0 , snake_case :Any=0.0 , snake_case :Dict=0.0 , snake_case :List[Any]=False , snake_case :str=0.02 , **snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_A )
A_ : Optional[Any] = image_size
A_ : List[Any] = patch_size
A_ : Optional[int] = num_channels
A_ : Dict = max_token_length
A_ : str = num_character_labels
A_ : int = num_bpe_labels
A_ : str = num_wordpiece_labels
A_ : List[Any] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : List[str] = mlp_ratio
A_ : Optional[Any] = distilled
A_ : Tuple = layer_norm_eps
A_ : int = drop_rate
A_ : List[Any] = qkv_bias
A_ : Any = attn_drop_rate
A_ : Optional[int] = drop_path_rate
A_ : List[Any] = output_aa_attentions
A_ : int = initializer_range
| 300
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase : Any = 250004
lowercase : Any = 250020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Dict = MBartaaTokenizer
lowercase : Optional[int] = MBartaaTokenizerFast
lowercase : Any = True
lowercase : Optional[Any] = True
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : str = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Dict = "<s>"
__UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCamelCase ) , 10_54 )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
__UpperCamelCase : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCamelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : int = {"input_ids": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCamelCase : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : List[str] = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : Any = tempfile.mkdtemp()
__UpperCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__UpperCamelCase )
__UpperCamelCase : Any = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCamelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
__UpperCamelCase : Dict = tokenizer_r.from_pretrained(__UpperCamelCase )
__UpperCamelCase : int = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : str = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
__UpperCamelCase : int = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
__UpperCamelCase : Optional[int] = tokenizer_r.from_pretrained(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
__UpperCamelCase : List[Any] = tempfile.mkdtemp()
__UpperCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
__UpperCamelCase : List[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCamelCase : Tuple = tokenizer_r.from_pretrained(__UpperCamelCase )
__UpperCamelCase : Dict = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
lowercase : Optional[Any] = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase : Union[str, Any] = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __lowerCamelCase ( cls ) -> int:
'''simple docstring'''
__UpperCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__UpperCamelCase : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_00_38 )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
__UpperCamelCase : Tuple = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__UpperCamelCase : Optional[int] = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Tuple = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCamelCase )
__UpperCamelCase : int = 10
__UpperCamelCase : List[Any] = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_00_53, 25_00_01] )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = tempfile.mkdtemp()
__UpperCamelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
__UpperCamelCase : str = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
__UpperCamelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__UpperCamelCase : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
__UpperCamelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="pt" )
__UpperCamelCase : int = targets["input_ids"]
__UpperCamelCase : Dict = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_00_04, 62, 30_34, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_00_01,
} , )
| 171
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : int = copy.deepcopy(self )
__UpperCamelCase : List[Any] = self.label_schema.copy()
__UpperCamelCase : Union[str, Any] = features[self.label_column]
__UpperCamelCase : Optional[Any] = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 171
| 1
|
def a ( snake_case__: float , snake_case__: int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) , snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 30
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def snake_case_ ( A_ : Accelerator, A_ : int = 16 ):
'''simple docstring'''
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : int = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(A_ : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Dict = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : str = datasets.map(
A_, batched=A_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(A_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : Dict = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : Optional[Any] = 8
else:
_lowerCamelCase : Tuple = None
return tokenizer.pad(
A_, padding='''longest''', max_length=A_, pad_to_multiple_of=A_, return_tensors='''pt''', )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=A_, collate_fn=A_, batch_size=A_ )
_lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['''validation'''], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def snake_case_ ( A_ : str, A_ : int ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', A_ ) == "1":
_lowerCamelCase : Optional[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_lowerCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with='''all''', project_dir=args.project_dir )
else:
_lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Tuple = config['''lr''']
_lowerCamelCase : List[Any] = int(config['''num_epochs'''] )
_lowerCamelCase : Tuple = int(config['''seed'''] )
_lowerCamelCase : List[Any] = int(config['''batch_size'''] )
set_seed(A_ )
_lowerCamelCase , _lowerCamelCase : Tuple = get_dataloaders(A_, A_ )
_lowerCamelCase : Optional[int] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : Optional[int] = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
_lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=1_00, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = accelerator.prepare(
A_, A_, A_, A_, A_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_lowerCamelCase : List[str] = os.path.split(A_ )[-1].split('''.''' )[0]
accelerator.init_trackers(A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_lowerCamelCase : int = 0
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : List[str] = model(**A_ )
_lowerCamelCase : Optional[int] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_lowerCamelCase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**A_ )
_lowerCamelCase : str = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A_, references=A_, )
_lowerCamelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', A_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A_ ),
'''epoch''': epoch,
}, step=A_, )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=A_, default=A_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''', action='''store_true''', help='''Whether to load in all available experiment trackers from the environment and use them for logging.''', )
parser.add_argument(
'''--project_dir''', type=A_, default='''logs''', help='''Location on where to store experiment tracking logs` and relevent project information''', )
_lowerCamelCase : Dict = parser.parse_args()
_lowerCamelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 175
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : List[str] = False
elif args.student_type == "gpt2":
_lowerCamelCase : Any = False
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Optional[int] = False
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''', action='''store_true''', help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''', type=A_, required=A_, help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''', type=A_, required=A_, help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''', )
parser.add_argument(
'''--student_type''', type=A_, choices=['''distilbert''', '''roberta''', '''gpt2'''], required=A_, help='''The student type (DistilBERT, RoBERTa).''', )
parser.add_argument('''--student_config''', type=A_, required=A_, help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''', default=A_, type=A_, help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''', choices=['''bert''', '''roberta''', '''gpt2'''], required=A_, help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''', type=A_, required=A_, help='''The teacher model.''' )
parser.add_argument('''--temperature''', default=2.0, type=A_, help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''', default=0.5, type=A_, help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''', default=0.0, type=A_, help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''', )
parser.add_argument('''--alpha_clm''', default=0.5, type=A_, help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''', default=0.0, type=A_, help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''', default=0.0, type=A_, help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''', action='''store_true''', help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''', default=0.15, type=A_, help='''Proportion of tokens for which we need to make a prediction.''', )
parser.add_argument('''--word_mask''', default=0.8, type=A_, help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''', default=0.1, type=A_, help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''', default=0.1, type=A_, help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''', default=0.7, type=A_, help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''', )
parser.add_argument('''--token_counts''', type=A_, help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''', action='''store_true''', help='''If true, compute the distillation loss only the [MLM] prediction distribution.''', )
parser.add_argument(
'''--freeze_pos_embs''', action='''store_true''', help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''', )
parser.add_argument(
'''--freeze_token_type_embds''', action='''store_true''', help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''', )
parser.add_argument('''--n_epoch''', type=A_, default=3, help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''', type=A_, default=5, help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''', action='''store_false''', help='''If true, group sequences that have similar length into the same batch. Default is true.''', )
parser.add_argument(
'''--gradient_accumulation_steps''', type=A_, default=50, help='''Gradient accumulation for larger training batches.''', )
parser.add_argument('''--warmup_prop''', default=0.05, type=A_, help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''', default=0.0, type=A_, help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''', default=5E-4, type=A_, help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''', default=1E-6, type=A_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', default=5.0, type=A_, help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''', default=0.02, type=A_, help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=A_, default='''O1''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_gpu''', type=A_, default=1, help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''', type=A_, default=-1, help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''', type=A_, default=56, help='''Random seed''' )
parser.add_argument('''--log_interval''', type=A_, default=5_00, help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''', type=A_, default=40_00, help='''Checkpoint interval.''' )
_lowerCamelCase : List[Any] = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, '''parameters.json''' ), '''w''' ) as f:
json.dump(vars(A_ ), A_, indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Optional[int] = tokenizer.all_special_tokens.index(A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCamelCase : Optional[Any] = special_tok_ids
_lowerCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, '''rb''' ) as fp:
_lowerCamelCase : Any = pickle.load(A_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, '''rb''' ) as fp:
_lowerCamelCase : str = pickle.load(A_ )
_lowerCamelCase : List[Any] = np.maximum(A_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : List[Any] = 0.0 # do not predict special tokens
_lowerCamelCase : str = torch.from_numpy(A_ )
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = LmSeqsDataset(params=A_, data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCamelCase : str = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCamelCase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A_ )
else:
_lowerCamelCase : Optional[Any] = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase : int = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_, A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_, A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[int] = Distiller(
params=A_, dataset=A_, token_probs=A_, student=A_, teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 175
| 1
|
from __future__ import annotations
def lowerCAmelCase__( lowercase : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Dict:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=30 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=10 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=None , UpperCAmelCase_=2 , ) -> Union[str, Any]:
lowerCamelCase : Tuple = parent
lowerCamelCase : int = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : List[Any] = is_training
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : List[Any] = attention_probs_dropout_prob
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = scope
lowerCamelCase : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
lowerCamelCase : Dict = num_patches + 2
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
lowerCamelCase : Optional[Any] = TFDeiTModel(config=UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(config=UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : str = TFDeiTForMaskedImageModeling(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Tuple = TFDeiTForImageClassification(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : List[str] = TFDeiTForImageClassification(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = TFDeiTModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> int:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Dense ) )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ) -> List[Any]:
lowerCamelCase : Union[str, Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _UpperCamelCase ( self ) -> List[Any]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFDeiTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase : List[str] = self.default_image_processor
lowerCamelCase : Dict = prepare_img()
lowerCamelCase : Any = image_processor(images=UpperCAmelCase_ , return_tensors='tf' )
# forward pass
lowerCamelCase : List[str] = model(**UpperCAmelCase_ )
# verify the logits
lowerCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCamelCase : str = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 205
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A = pytest.mark.integration
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def _UpperCamelCase ( self ) -> List[Any]:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
lowerCamelCase : Optional[int] = dset.map(
lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ )
lowerCamelCase : Dict = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def _UpperCamelCase ( self ) -> Tuple:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> int:
import faiss
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase , lowerCamelCase : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Tuple = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase_ )
lowerCamelCase , lowerCamelCase : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
import faiss
lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : List[str] = 1
lowerCamelCase , lowerCamelCase : int = index.search(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase : Tuple = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase , lowerCamelCase : List[str] = index.search_batch(UpperCAmelCase_ )
self.assertRaises(UpperCAmelCase_ , index.search_batch , queries[0] )
lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
import faiss
lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase_ ):
lowerCamelCase : str = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = faiss.IndexFlat(5 )
lowerCamelCase : Any = FaissIndex(custom_index=UpperCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _UpperCamelCase ( self ) -> Any:
import faiss
lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase , lowerCamelCase : str = index.search(UpperCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCAmelCase ( a_ ):
'''simple docstring'''
import faiss
lowerCamelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
lowerCamelCase : Union[str, Any] = 'index.faiss'
lowerCamelCase : List[Any] = F"""mock://{index_name}"""
index.save(a_, storage_options=mockfs.storage_options )
lowerCamelCase : Optional[int] = FaissIndex.load(a_, storage_options=mockfs.storage_options )
lowerCamelCase : str = np.zeros(5, dtype=np.floataa )
lowerCamelCase : str = 1
lowerCamelCase , lowerCamelCase : int = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self ) -> int:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase : Union[str, Any] = Elasticsearch()
lowerCamelCase : Optional[Any] = {'acknowledged': True}
lowerCamelCase : str = ElasticSearchIndex(es_client=UpperCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase : Tuple = 'foo'
lowerCamelCase : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Any = index.search(UpperCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase : Dict = 'foo'
lowerCamelCase : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase , lowerCamelCase : Optional[Any] = index.search(UpperCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase : str = ['foo', 'bar', 'foobar']
lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Optional[int] = index.search_batch(UpperCAmelCase_ )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
# batched queries with timeout
lowerCamelCase : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase , lowerCamelCase : Dict = index.search_batch(UpperCAmelCase_ , request_timeout=30 )
lowerCamelCase : Dict = [scores[0] for scores in total_scores]
lowerCamelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase_ )
| 205
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Union[str, Path]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Optional[Union[str, bool]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
def UpperCAmelCase_ ( self : Tuple ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 151
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A : Any = ""
A : Union[str, Any] = ""
A : List[str] = ""
A : List[str] = 1 # (0 is vertical, 1 is horizontal)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = get_dataset(_UpperCamelCase , _UpperCamelCase )
print("Processing..." )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = update_image_and_anno(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for index, image in enumerate(_UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCAmelCase = random_chars(32 )
__lowerCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCAmelCase = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_UpperCamelCase )} with {file_name}" )
__lowerCAmelCase = []
for anno in new_annos[index]:
__lowerCAmelCase = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_UpperCamelCase )
with open(f"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , "*.txt" ) ):
__lowerCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_UpperCamelCase ) as in_file:
__lowerCAmelCase = in_file.readlines()
__lowerCAmelCase = os.path.join(_UpperCamelCase , f"{label_name}.jpg" )
__lowerCAmelCase = []
for obj_list in obj_lists:
__lowerCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCamelCase )
labels.append(_UpperCamelCase )
return img_paths, labels
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for idx in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = []
__lowerCAmelCase = img_list[idx]
path_list.append(_UpperCamelCase )
__lowerCAmelCase = anno_list[idx]
__lowerCAmelCase = cva.imread(_UpperCamelCase )
if flip_type == 1:
__lowerCAmelCase = cva.flip(_UpperCamelCase , _UpperCamelCase )
for bbox in img_annos:
__lowerCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCAmelCase = cva.flip(_UpperCamelCase , _UpperCamelCase )
for bbox in img_annos:
__lowerCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCamelCase )
new_imgs_list.append(_UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _lowerCamelCase ( _UpperCamelCase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 259
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__lowerCAmelCase = v.half()
if save_path is None: # overwrite src_path
__lowerCAmelCase = src_path
torch.save(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 259
| 1
|
"""simple docstring"""
import json
import sys
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
with open(lowerCAmelCase , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Any = json.load(lowerCAmelCase )
UpperCAmelCase__ : Tuple = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = results[benchmark_name]
UpperCAmelCase__ : Dict = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase__ : Tuple = """| metric |"""
UpperCAmelCase__ : Tuple = """|--------|"""
UpperCAmelCase__ : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = benchmark_res[metric_name]
UpperCAmelCase__ : Optional[Any] = metric_vals["""new"""]
UpperCAmelCase__ : Dict = metric_vals.get("""old""" , lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = metric_vals.get("""diff""" , lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = F""" {new_val:f}""" if isinstance(lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCAmelCase ) )
if __name__ == "__main__":
_A = sys.argv[1]
_A = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 171
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 358
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : int=32 , lowercase_ : List[Any]=2 , lowercase_ : Optional[int]=4 , lowercase_ : Dict=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.0_2 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=4 , lowercase_ : Any=None , ):
UpperCamelCase__ : Any =parent
UpperCamelCase__ : Any =13
UpperCamelCase__ : int =7
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
UpperCamelCase__ : int =True
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Any =99
UpperCamelCase__ : Any =32
UpperCamelCase__ : Union[str, Any] =2
UpperCamelCase__ : List[Any] =4
UpperCamelCase__ : Any =37
UpperCamelCase__ : Union[str, Any] ='''gelu'''
UpperCamelCase__ : Dict =0.1
UpperCamelCase__ : int =0.1
UpperCamelCase__ : Union[str, Any] =512
UpperCamelCase__ : Dict =16
UpperCamelCase__ : List[Any] =2
UpperCamelCase__ : str =0.0_2
UpperCamelCase__ : Optional[Any] =3
UpperCamelCase__ : List[str] =4
UpperCamelCase__ : Optional[int] =None
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any =None
if self.use_input_mask:
UpperCamelCase__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] =None
if self.use_token_type_ids:
UpperCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : str =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : str =None
if self.use_labels:
UpperCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : int =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : str =TFRoFormerModel(config=lowercase_ )
UpperCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : Dict =[input_ids, input_mask]
UpperCamelCase__ : Tuple =model(lowercase_ )
UpperCamelCase__ : str =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : List[Any] =TFRoFormerForCausalLM(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Any =model(lowercase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
UpperCamelCase__ : str =TFRoFormerForMaskedLM(config=lowercase_ )
UpperCamelCase__ : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : Tuple =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForSequenceClassification(config=lowercase_ )
UpperCamelCase__ : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =self.num_choices
UpperCamelCase__ : Tuple =TFRoFormerForMultipleChoice(config=lowercase_ )
UpperCamelCase__ : Optional[int] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : List[str] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Tuple =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
UpperCamelCase__ : Optional[int] =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForTokenClassification(config=lowercase_ )
UpperCamelCase__ : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : int =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
UpperCamelCase__ : Dict =TFRoFormerForQuestionAnswering(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Tuple =config_and_inputs
UpperCamelCase__ : Any ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[Any] =TFRoFormerModelTester(self )
UpperCamelCase__ : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase_ )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : List[str] =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase__ : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : Any =model(lowercase_ )[0]
# TODO Replace vocab size
UpperCamelCase__ : Union[str, Any] =5_0000
UpperCamelCase__ : Optional[Any] =[1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase__ : Optional[Any] =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4 )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : str =tf.constant([[4, 10]] )
UpperCamelCase__ : Dict =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase__ : Any =emba(input_ids.shape )
UpperCamelCase__ : Union[str, Any] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
UpperCamelCase__ : int =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase__ : Optional[int] =emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : str ):
# 2,12,16,64
UpperCamelCase__ : Optional[int] =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[int] =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase__ : Union[str, Any] =embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
UpperCamelCase__ : List[str] =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 157
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175
|
from __future__ import annotations
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ): # noqa: E741
while r - l > 1:
UpperCamelCase_ : Union[str, Any] = (l + r) // 2
if v[m] >= key:
UpperCamelCase_ : str = m
else:
UpperCamelCase_ : List[Any] = m # noqa: E741
return r
def __lowercase ( lowerCamelCase : list[int] ):
if len(lowerCamelCase ) == 0:
return 0
UpperCamelCase_ : Tuple = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase_ : Any = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase_ : Dict = v[i]
length += 1
else:
UpperCamelCase_ : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a ): # This function is recursive
snake_case_ : Optional[int] = len(_a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case_ : Union[str, Any] = array[0]
snake_case_ : Optional[Any] = False
snake_case_ : Optional[Any] = 1
snake_case_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case_ : Optional[int] = True
snake_case_ : Dict = [element for element in array[i:] if element >= array[i]]
snake_case_ : Optional[int] = longest_subsequence(_a )
if len(_a ) > len(_a ):
snake_case_ : Optional[int] = temp_array
else:
i += 1
snake_case_ : Optional[int] = [element for element in array[1:] if element >= pivot]
snake_case_ : str = [pivot, *longest_subsequence(_a )]
if len(_a ) > len(_a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self : List[str] ):
snake_case_, snake_case_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
snake_case_ : Tuple = jax.device_count()
snake_case_ : Dict = num_samples * [prompt]
snake_case_ : Tuple = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : str = replicate(lowercase_ )
snake_case_ : Any = shard(lowercase_ )
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : Optional[Any] = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : str = images[0, 253:256, 253:256, -1]
snake_case_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : int = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : str ):
snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2'''
snake_case_, snake_case_ : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_, snake_case_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
snake_case_ : List[Any] = scheduler_params
snake_case_ : int = '''A painting of a squirrel eating a burger'''
snake_case_ : str = jax.device_count()
snake_case_ : Union[str, Any] = num_samples * [prompt]
snake_case_ : int = sd_pipe.prepare_inputs(lowercase_ )
snake_case_ : List[str] = replicate(lowercase_ )
snake_case_ : List[Any] = shard(lowercase_ )
snake_case_ : int = jax.random.PRNGKey(0 )
snake_case_ : Tuple = jax.random.split(lowercase_ , jax.device_count() )
snake_case_ : int = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : List[str] = images[0, 253:256, 253:256, -1]
snake_case_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 155
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """trajectory_transformer"""
_a = ["""past_key_values"""]
_a = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase=100 , lowerCAmelCase=5 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=249 , lowerCAmelCase=6 , lowerCAmelCase=17 , lowerCAmelCase=25 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=128 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0006 , lowerCAmelCase=512 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=1 , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=50_256 , lowerCAmelCase=50_256 , **lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =action_weight
_lowercase =reward_weight
_lowercase =value_weight
_lowercase =max_position_embeddings
_lowercase =block_size
_lowercase =action_dim
_lowercase =observation_dim
_lowercase =transition_dim
_lowercase =learning_rate
_lowercase =n_layer
_lowercase =n_head
_lowercase =n_embd
_lowercase =embd_pdrop
_lowercase =attn_pdrop
_lowercase =resid_pdrop
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =kaiming_initializer_range
_lowercase =use_cache
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 205
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def a ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ) -> Optional[int]:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained(A__ )
_lowercase =load_dataset('glue' , 'mrpc' )
def tokenize_function(A__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase =datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
_lowercase =DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : List[str] , A__ : Dict ) -> Dict:
"""simple docstring"""
model.eval()
_lowercase =0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**A__ )
_lowercase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase , _lowercase =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
_lowercase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
_lowercase =metric.compute()
return eval_metric["accuracy"]
def a ( A__ : str , A__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['lr']
_lowercase =int(config['num_epochs'] )
_lowercase =int(config['seed'] )
_lowercase =int(config['batch_size'] )
_lowercase =args.model_name_or_path
set_seed(A__ )
_lowercase , _lowercase =get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
_lowercase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase =optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
_lowercase =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowercase =1
_lowercase =(len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase =get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
_lowercase =DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
_lowercase =0
# We also need to keep track of the stating epoch so files are named properly
_lowercase =0
_lowercase =evaluate.load('glue' , 'mrpc' )
_lowercase =num_epochs
if args.partial_train_epoch is not None:
_lowercase =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowercase =args.resume_from_checkpoint.split('epoch_' )[1]
_lowercase =''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowercase =int(A__ ) + 1
_lowercase =evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print('resumed checkpoint performance:' , A__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
_lowercase =json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowercase ={}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
_lowercase =model(**A__ )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowercase =F'''epoch_{epoch}'''
_lowercase =os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
_lowercase =evaluation_loop(A__ , A__ , A__ , A__ )
_lowercase =accuracy
_lowercase =lr_scheduler.get_lr()[0]
_lowercase =optimizer.param_groups[0]['lr']
_lowercase =epoch
_lowercase =overall_step
accelerator.print(F'''epoch {epoch}:''' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(A__ , A__ )
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=A__ , default=A__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=A__ , default=A__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=2 , help='Number of train epochs.' , )
_lowercase =parser.parse_args()
_lowercase ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 205
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Union[str, Any] = IFImgaImgSuperResolutionPipeline
__A : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__A : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__A : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __lowercase ( self , lowercase , lowercase=0) -> Dict:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : str = torch.manual_seed(lowercase)
else:
a__ : Optional[int] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase)).to(lowercase)
a__ : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase)).to(lowercase)
a__ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_save_load_local()
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 225
|
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Any = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
# get prompt text embeddings
UpperCamelCase :Any = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase :Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase :Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase :List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = text_embeddings.shape
UpperCamelCase :Optional[Any] = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase :Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase :Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase :List[str]
if negative_prompt is None:
UpperCamelCase :Optional[Any] = ['''''']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
F''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCamelCase :Any = negative_prompt
UpperCamelCase :Dict = text_input_ids.shape[-1]
UpperCamelCase :Tuple = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
UpperCamelCase :List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase :Tuple = uncond_embeddings.shape[1]
UpperCamelCase :Dict = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase :Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase :Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase :Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase :Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase :List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase :Tuple = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCamelCase :Optional[Any] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase :int = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase :List[str] = latents_reference.to(self.device )
UpperCamelCase :Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase :Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase :Optional[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase :Any = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase :List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase :List[str] = 0 if dx < 0 else dx
UpperCamelCase :Union[str, Any] = 0 if dy < 0 else dy
UpperCamelCase :Union[str, Any] = max(-dx , 0 )
UpperCamelCase :Any = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase :List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase :str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase :int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase :List[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase :Dict = {}
if accepts_eta:
UpperCamelCase :List[Any] = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase :int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase :Any = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase :Optional[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase :Dict = noise_pred.chunk(2 )
UpperCamelCase :Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase :Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = 1 / 0.1_8215 * latents
UpperCamelCase :Any = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase :Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase :List[str] = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='''pt''' ).to(
self.device )
UpperCamelCase , UpperCamelCase :Optional[int] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase :Tuple = None
if output_type == "pil":
UpperCamelCase :List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 259
|
def _A ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Optional[int] = 1
UpperCamelCase :List[Any] = 2
while i * i <= n:
UpperCamelCase :str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 259
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
UpperCamelCase__ = DetaConfig(
backbone_config=__a , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__a , with_box_refine=__a , two_stage=__a , )
# set labels
UpperCamelCase__ = """huggingface/label-files"""
if "o365" in model_name:
UpperCamelCase__ = 366
UpperCamelCase__ = """object365-id2label.json"""
else:
UpperCamelCase__ = 91
UpperCamelCase__ = """coco-detection-id2label.json"""
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __magic_name__ ( __a : Any , __a : Tuple , __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(__a )
UpperCamelCase__ = val
def __magic_name__ ( __a : Optional[int] , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __a : Optional[Any] , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:hidden_size, :]
UpperCamelCase__ = in_proj_bias[:hidden_size]
UpperCamelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size:, :]
UpperCamelCase__ = in_proj_bias[-hidden_size:]
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __a : Any , __a : Dict , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = get_deta_config(__a )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase__ = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"Model name {model_name} not supported" )
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__a , param.shape )
# rename keys
UpperCamelCase__ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_swin_q_k_v(__a , config.backbone_config )
read_in_decoder_q_k_v(__a , __a )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
if "input_proj" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase__ = state_dict.pop(__a )
UpperCamelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ = DetaForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__a )
# load image processor
UpperCamelCase__ = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = processor(images=__a , return_tensors="""pt""" )
UpperCamelCase__ = encoding["""pixel_values"""]
UpperCamelCase__ = model(pixel_values.to(__a ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase__ = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
UpperCamelCase__ = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
UpperCamelCase__ = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__a ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__a ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178
| 0
|
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase__ = logging.getLogger(__name__)
lowercase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : Optional[str] = field(
default=_lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_lowercase )} , )
a_ : Optional[str] = field(
default=_lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ : bool = field(
default=_lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : bool = field(
default=_lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase ( self : str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ : Optional[str] = field(default=_lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ : Optional[str] = field(
default=_lowercase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ : bool = field(
default=_lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ : Optional[int] = field(
default=_lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ : Optional[int] = field(
default=_lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ : bool = field(
default=_lowercase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowerCamelCase ( self : Any ):
if self.train_file is not None:
lowerCAmelCase_ : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCAmelCase_ : str = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
with open(snake_case__ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase_ : List[str] = [json.loads(snake_case__ ) for line in f.read().splitlines() if (len(snake_case__ ) > 0 and not line.isspace())]
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase_ : Optional[int] = {c: dataset[c] for c in dataset.column_names}
lowerCAmelCase_ : Any = refs
return Dataset.from_dict(snake_case__ )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase_ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCAmelCase_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCAmelCase_ : List[Any] = {}
if data_args.train_file is not None:
lowerCAmelCase_ : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase_ : List[str] = data_args.validation_file
lowerCAmelCase_ : Tuple = data_args.train_file.split("." )[-1]
if extension == "txt":
lowerCAmelCase_ : str = "text"
lowerCAmelCase_ : List[Any] = load_dataset(snake_case__ , data_files=snake_case__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase_ : Any = AutoConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
lowerCAmelCase_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
lowerCAmelCase_ : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowerCAmelCase_ : List[Any] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case__ )
elif model_args.model_name_or_path:
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
lowerCAmelCase_ : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase_ : Any = AutoModelForMaskedLM.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCAmelCase_ : List[str] = datasets["train"].column_names
else:
lowerCAmelCase_ : Union[str, Any] = datasets["validation"].column_names
lowerCAmelCase_ : Union[str, Any] = "text" if "text" in column_names else column_names[0]
lowerCAmelCase_ : Any = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(__UpperCamelCase ):
# Remove empty lines
lowerCAmelCase_ : Any = [line for line in examples["text"] if len(snake_case__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=snake_case__ , truncation=snake_case__ , max_length=data_args.max_seq_length )
lowerCAmelCase_ : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCAmelCase_ : str = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCAmelCase_ : List[str] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCAmelCase_ : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCAmelCase_ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCAmelCase_ : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase_ : str = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase_ : int = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase_ : Any = model_args.model_name_or_path
else:
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase_ : str = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case__ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
lowerCAmelCase_ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase_ : List[Any] = trainer.evaluate()
lowerCAmelCase_ : int = math.exp(eval_output["eval_loss"] )
lowerCAmelCase_ : Union[str, Any] = perplexity
lowerCAmelCase_ : List[Any] = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 241
|
def _UpperCamelCase ( snake_case__ ) -> list:
__UpperCAmelCase : Dict = [0] * len(snake_case__ )
for i in range(1, len(snake_case__ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase : Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase : Tuple = j
return prefix_result
def _UpperCamelCase ( snake_case__ ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 0
|
"""simple docstring"""
import math
import os
import sys
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = ""
try:
with open(_SCREAMING_SNAKE_CASE , "rb" ) as binary_file:
UpperCamelCase = binary_file.read()
for dat in data:
UpperCamelCase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lexicon.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = last_match_id
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
UpperCamelCase = "0" + lexicon[curr_key]
UpperCamelCase = bin(_SCREAMING_SNAKE_CASE )[2:]
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {"0": "0", "1": "1"}
UpperCamelCase , UpperCamelCase = "", ""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
index += 1
UpperCamelCase = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase = lexicon[curr_string]
result += last_match_id
return result
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = os.path.getsize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = bin(_SCREAMING_SNAKE_CASE )[2:]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 8
try:
with open(_SCREAMING_SNAKE_CASE , "wb" ) as opened_file:
UpperCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = read_file_binary(_SCREAMING_SNAKE_CASE )
UpperCamelCase = compress_data(_SCREAMING_SNAKE_CASE )
UpperCamelCase = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 244
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244
| 1
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['input_values', 'attention_mask']
def __init__( self : Optional[Any] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1_6000 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : bool = False , lowerCAmelCase : int = 80 , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 64 , lowerCAmelCase : str = "hann_window" , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 80 , lowerCAmelCase : float = 7600 , lowerCAmelCase : float = 1e-10 , lowerCAmelCase : int = 2 , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ):
super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = do_normalize
lowerCAmelCase = return_attention_mask
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = frame_signal_scale
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = reduction_factor
lowerCAmelCase = win_length * sampling_rate // 1000
lowerCAmelCase = hop_length * sampling_rate // 1000
lowerCAmelCase = optimal_fft_length(self.sample_size )
lowerCAmelCase = (self.n_fft // 2) + 1
lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowercase ( lowerCAmelCase : List[np.ndarray] , lowerCAmelCase : List[np.ndarray] , lowerCAmelCase : float = 0.0 ):
if attention_mask is not None:
lowerCAmelCase = np.array(lowerCAmelCase , np.intaa )
lowerCAmelCase = []
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(lowerCAmelCase )
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __lowercase ( self : int , lowerCAmelCase : np.ndarray , ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self : Tuple , lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : int , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
lowerCAmelCase = self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase = inputs_target["""input_values"""]
lowerCAmelCase = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : bool = False , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : Dict , ):
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase = [self._extract_mel_features(lowerCAmelCase ) for waveform in speech]
lowerCAmelCase = BatchFeature({"""input_values""": features} )
lowerCAmelCase = self.num_mel_bins
else:
lowerCAmelCase = BatchFeature({"""input_values""": speech} )
lowerCAmelCase = self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = feature_size_hack
# convert input values to correct format
lowerCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def __lowercase ( self : List[Any] ):
lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 155
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a = logging.get_logger(__name__)
a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'deberta-v2'
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any]=12_8100 , lowerCAmelCase : Any=1536 , lowerCAmelCase : str=24 , lowerCAmelCase : str=24 , lowerCAmelCase : Optional[Any]=6144 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=1e-7 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Dict=-1 , lowerCAmelCase : int=0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=None , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict="gelu" , **lowerCAmelCase : Any , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = relative_attention
lowerCAmelCase = max_relative_positions
lowerCAmelCase = pad_token_id
lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase ) == str:
lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCAmelCase = pos_att_type
lowerCAmelCase = vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = kwargs.get("""pooler_hidden_size""" , lowerCAmelCase )
lowerCAmelCase = pooler_dropout
lowerCAmelCase = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __lowercase ( self : List[str] ):
return 12
def __lowercase ( self : int , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase = super().generate_dummy_inputs(preprocessor=lowerCAmelCase , framework=lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 155
| 1
|
"""simple docstring"""
import math
def _lowerCAmelCase ( lowercase_ = 100 ):
UpperCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 354
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ):
UpperCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowercase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowercase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowercase_ )
return parser.parse_args()
def _lowerCAmelCase ( ):
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(lowercase_ )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 181
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.