code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
_lowerCamelCase : int = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_lowerCamelCase : Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = from_type.lower().strip('''s''' )
A__ = to_type.lower().strip('''s''' )
A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ )
if from_sanitized not in METRIC_CONVERSION:
A__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
if to_sanitized not in METRIC_CONVERSION:
A__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(lowercase_ )}"""
)
raise ValueError(lowercase_ )
A__ = METRIC_CONVERSION[from_sanitized]
A__ = METRIC_CONVERSION[to_sanitized]
A__ = 1
if from_exponent > to_exponent:
A__ = from_exponent - to_exponent
else:
A__ = -(to_exponent - from_exponent)
return value * pow(10 , lowercase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str )-> int:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple , lowerCAmelCase: str="attention" )-> List[Any]:
_snake_case : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case : List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any]=False )-> int:
if split_mlp_wi:
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case : Dict = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case : List[str] = (wi_a, wi_a)
else:
_snake_case : Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case : Any = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: int , lowerCAmelCase: Optional[int] )-> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def lowerCamelCase_ ( lowerCAmelCase: dict , *, lowerCAmelCase: int , lowerCAmelCase: bool , lowerCAmelCase: bool = False )-> str:
_snake_case : List[Any] = traverse_util.flatten_dict(variables['target'] )
_snake_case : Dict = {'/'.join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case : str = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase )
_snake_case : Any = collections.OrderedDict()
# Shared embeddings.
_snake_case : Optional[int] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Tuple = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : Tuple = q.T
_snake_case : Optional[Any] = v.T
# Block i, layer 1 (MLP).
_snake_case : Any = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : List[Any] = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'encoder' , lowerCAmelCase )
_snake_case : int = layer_norm
if split_mlp_wi:
_snake_case : Union[str, Any] = wi[0].T
_snake_case : Any = wi[1].T
else:
_snake_case : str = wi.T
_snake_case : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Optional[Any] = tax_relpos_bias_lookup(
lowerCAmelCase , lowerCAmelCase , 'encoder' ).T
_snake_case : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
_snake_case : Any = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'encoder' ).T
_snake_case : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : List[str] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'self_attention' )
_snake_case : List[str] = layer_norm
_snake_case : Tuple = k.T
_snake_case : str = o.T
_snake_case : str = q.T
_snake_case : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'encoder_decoder_attention' )
_snake_case : Optional[Any] = layer_norm
_snake_case : List[str] = k.T
_snake_case : Optional[Any] = o.T
_snake_case : Union[str, Any] = q.T
_snake_case : List[Any] = v.T
# Block i, layer 2 (MLP).
_snake_case : List[str] = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
_snake_case , _snake_case : Tuple = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' , lowerCAmelCase )
_snake_case : str = layer_norm
if split_mlp_wi:
_snake_case : List[str] = wi[0].T
_snake_case : List[str] = wi[1].T
else:
_snake_case : Union[str, Any] = wi.T
_snake_case : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case : Dict = tax_relpos_bias_lookup(lowerCAmelCase , lowerCAmelCase , 'decoder' ).T
_snake_case : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case : Union[str, Any] = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: bool )-> int:
_snake_case : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case : str = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case : List[str] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_snake_case : Dict = state_dict['shared.weight']
return state_dict
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Optional[int]:
_snake_case : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_snake_case : Optional[int] = convert_tax_to_pytorch(
lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase , scalable_attention=lowerCAmelCase )
_snake_case : Tuple = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[Any] , lowerCAmelCase: Dict , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , )-> Tuple:
_snake_case : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case : int = UMTaEncoderModel(lowerCAmelCase )
else:
_snake_case : int = UMTaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 411
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = 42 # [batch_size x 3]
lowerCAmelCase = 42 # [batch_size x 3]
lowerCAmelCase = 42 # [batch_size x 3]
lowerCAmelCase = 42 # [batch_size x 3]
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self : Tuple ) -> torch.Tensor:
"""simple docstring"""
lowerCAmelCase = torch.arange(self.height * self.width )
lowerCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase , *lowerCAmelCase = self.shape
lowerCAmelCase = int(np.prod(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = self.get_image_coords()
lowerCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCAmelCase = self.get_camera_rays(SCREAMING_SNAKE_CASE )
lowerCAmelCase = rays.view(SCREAMING_SNAKE_CASE , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCAmelCase = coords.view(SCREAMING_SNAKE_CASE , -1 , 2 )
lowerCAmelCase = self.resolution()
lowerCAmelCase = self.fov()
lowerCAmelCase = (flat.float() / (res - 1)) * 2 - 1
lowerCAmelCase = fracs * torch.tan(fov / 2 )
lowerCAmelCase = fracs.view(SCREAMING_SNAKE_CASE , -1 , 2 )
lowerCAmelCase = (
self.z.view(SCREAMING_SNAKE_CASE , 1 , 3 )
+ self.x.view(SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCAmelCase = directions / directions.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , 2 , 3 )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , x_fov=self.x_fov , y_fov=self.y_fov , )
def __a ( A__ ) -> DifferentiableProjectiveCamera:
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCAmelCase = np.array([np.sin(A__ ), np.cos(A__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCAmelCase = -z * 4
lowerCAmelCase = np.array([np.cos(A__ ), -np.sin(A__ ), 0.0] )
lowerCAmelCase = np.cross(A__ , A__ )
origins.append(A__ )
xs.append(A__ )
ys.append(A__ )
zs.append(A__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A__ , axis=0 ) ).float() , width=A__ , height=A__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A__ )) , )
| 159
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 220
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = ['input_ids', 'attention_mask']
_lowercase = []
_lowercase = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Tuple =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ : str ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Tuple ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : str =1
SCREAMING_SNAKE_CASE_ : Tuple =len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Tuple ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
SCREAMING_SNAKE_CASE_ : List[str] ={v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ : int =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ : List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Any =None
return state
def __setstate__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any ={}
SCREAMING_SNAKE_CASE_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : List[Any] =self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , __UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =[]
SCREAMING_SNAKE_CASE_ : str =''
SCREAMING_SNAKE_CASE_ : List[str] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Dict =[]
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ : str =os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : List[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : List[str] =src_lang
SCREAMING_SNAKE_CASE_ : Dict =self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.convert_tokens_to_ids(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =tgt_lang_id
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Dict =src_lang
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_ : int =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
| 220
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=[30, 30] , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=8 , UpperCamelCase_=10 , ):
lowercase_ :Optional[int] = parent
lowercase_ :List[Any] = batch_size
lowercase_ :List[Any] = image_size
lowercase_ :List[str] = patch_size
lowercase_ :Optional[int] = num_channels
lowercase_ :List[str] = is_training
lowercase_ :Tuple = use_labels
lowercase_ :Tuple = hidden_size
lowercase_ :List[Any] = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Optional[Any] = hidden_act
lowercase_ :Dict = hidden_dropout_prob
lowercase_ :Optional[int] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :List[Any] = initializer_range
lowercase_ :str = num_labels
lowercase_ :Optional[Any] = scope
lowercase_ :Optional[int] = n_targets
lowercase_ :Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase_ :int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase_ :Optional[int] = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self ):
lowercase_ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase_ :str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase_ :str = []
for i in range(self.batch_size ):
lowercase_ :str = {}
lowercase_ :Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a )
lowercase_ :List[str] = torch.rand(self.n_targets , 4 , device=__a )
labels.append(__a )
lowercase_ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = YolosModel(config=__a )
model.to(__a )
model.eval()
lowercase_ :Optional[Any] = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = YolosForObjectDetection(__a )
model.to(__a )
model.eval()
lowercase_ :List[Any] = model(pixel_values=__a )
lowercase_ :int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase_ :Dict = model(pixel_values=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase ( self ):
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ :Optional[int] = config_and_inputs
lowercase_ :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase : int =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase : Dict =False
lowercase : Union[str, Any] =False
lowercase : List[str] =False
lowercase : List[Any] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
lowercase_ :Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase_ :int = []
for i in range(self.model_tester.batch_size ):
lowercase_ :Dict = {}
lowercase_ :Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long )
lowercase_ :List[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float )
labels.append(__a )
lowercase_ :Optional[Any] = labels
return inputs_dict
def UpperCamelCase ( self ):
lowercase_ :int = YolosModelTester(self )
lowercase_ :Union[str, Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase ( self ):
lowercase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :List[Any] = model_class(__a )
lowercase_ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :str = [*signature.parameters.keys()]
lowercase_ :Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Optional[Any] = True
# in YOLOS, the seq_len is different
lowercase_ :Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase_ :Tuple = True
lowercase_ :Optional[Any] = False
lowercase_ :Union[str, Any] = True
lowercase_ :int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowercase_ :str = model(**self._prepare_for_class(__a , __a ) )
lowercase_ :Union[str, Any] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ :Union[str, Any] = True
lowercase_ :Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowercase_ :Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
lowercase_ :Optional[int] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ :List[str] = len(__a )
# Check attention is always last and order is fine
lowercase_ :Union[str, Any] = True
lowercase_ :str = True
lowercase_ :List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowercase_ :str = model(**self._prepare_for_class(__a , __a ) )
lowercase_ :Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(__a ) )
lowercase_ :List[Any] = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowercase_ :Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
lowercase_ :str = outputs.hidden_states
lowercase_ :Union[str, Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
# YOLOS has a different seq_length
lowercase_ :Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a )
@slow
def UpperCamelCase ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Union[str, Any] = YolosModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase_ :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
lowercase_ :int = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__a )
lowercase_ :int = self.default_image_processor
lowercase_ :Tuple = prepare_img()
lowercase_ :Any = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
lowercase_ :Union[str, Any] = model(inputs.pixel_values )
# verify outputs
lowercase_ :List[str] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __a )
lowercase_ :Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__a , )
lowercase_ :Optional[int] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1E-4 ) )
# verify postprocessing
lowercase_ :List[str] = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase_ :Any = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__a )
lowercase_ :Union[str, Any] = [75, 75, 17, 63, 17]
lowercase_ :Dict = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__a )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __a , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __a )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __a ) )
| 707
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=[30, 30] , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=8 , UpperCamelCase_=10 , ):
lowercase_ :Tuple = parent
lowercase_ :Dict = batch_size
lowercase_ :Optional[Any] = image_size
lowercase_ :Optional[int] = patch_size
lowercase_ :List[str] = num_channels
lowercase_ :Dict = is_training
lowercase_ :int = use_labels
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Any = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :List[str] = hidden_act
lowercase_ :int = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = type_sequence_label_size
lowercase_ :Union[str, Any] = initializer_range
lowercase_ :Tuple = num_labels
lowercase_ :Dict = scope
lowercase_ :int = n_targets
lowercase_ :int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase_ :int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase_ :int = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase_ :Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase_ :Any = []
for i in range(self.batch_size ):
lowercase_ :Dict = {}
lowercase_ :Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase_ )
lowercase_ :str = torch.rand(self.n_targets , 4 , device=UpperCamelCase_ )
labels.append(UpperCamelCase_ )
lowercase_ :str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = YolosModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :str = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[int] = YolosForObjectDetection(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase_ :Dict = model(pixel_values=UpperCamelCase_ )
lowercase_ :Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase_ :Optional[Any] = model(pixel_values=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase : Union[str, Any] =(
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase : Optional[int] =False
lowercase : Optional[int] =False
lowercase : Dict =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
lowercase_ :Optional[Any] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase_ :Dict = []
for i in range(self.model_tester.batch_size ):
lowercase_ :Optional[int] = {}
lowercase_ :Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase_ , dtype=torch.long )
lowercase_ :Union[str, Any] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase_ , dtype=torch.float )
labels.append(UpperCamelCase_ )
lowercase_ :Optional[int] = labels
return inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = YolosModelTester(self )
lowercase_ :List[Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Any = model_class(UpperCamelCase_ )
lowercase_ :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Optional[Any] = [*signature.parameters.keys()]
lowercase_ :int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :Tuple = True
# in YOLOS, the seq_len is different
lowercase_ :Optional[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase_ :Any = True
lowercase_ :List[str] = False
lowercase_ :List[str] = True
lowercase_ :Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :str = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ :Union[str, Any] = True
lowercase_ :int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :List[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :int = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ :str = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowercase_ :List[str] = True
lowercase_ :Tuple = True
lowercase_ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :List[str] = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowercase_ :List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase_ :Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :str = outputs.hidden_states
lowercase_ :str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# YOLOS has a different seq_length
lowercase_ :Tuple = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ :Dict = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase_ )
@slow
def UpperCamelCase ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = YolosModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase_ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase_ )
lowercase_ :Tuple = self.default_image_processor
lowercase_ :str = prepare_img()
lowercase_ :List[str] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase_ :Optional[int] = model(inputs.pixel_values )
# verify outputs
lowercase_ :List[str] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase_ :int = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase_ , )
lowercase_ :Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
# verify postprocessing
lowercase_ :List[str] = image_processor.post_process_object_detection(
UpperCamelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase_ :str = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase_ )
lowercase_ :Optional[int] = [75, 75, 17, 63, 17]
lowercase_ :List[str] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase_ , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase_ ) )
| 441
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : int = True if """large""" in model_name or """huge""" in model_name else False
__UpperCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 3, 3]
__UpperCAmelCase : Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__UpperCAmelCase : str = [4, 4, 4, 4]
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__UpperCAmelCase : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
__UpperCAmelCase : Optional[Any] = [3, 3, 3, 3]
else:
__UpperCAmelCase : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
__UpperCAmelCase : List[str] = 96
elif "small" in model_name:
__UpperCAmelCase : Dict = 96
elif "base" in model_name:
__UpperCAmelCase : List[Any] = 128
elif "large" in model_name:
__UpperCAmelCase : Any = 192
elif "xlarge" in model_name:
__UpperCAmelCase : Tuple = 256
elif "huge" in model_name:
__UpperCAmelCase : int = 352
# set label information
__UpperCAmelCase : Tuple = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__UpperCAmelCase : Any = """imagenet-22k-id2label.json"""
else:
__UpperCAmelCase : Dict = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
if "patch_embed.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__UpperCAmelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__UpperCAmelCase : int = """encoder.""" + name
if "encoder.layers" in name:
__UpperCAmelCase : Optional[int] = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__UpperCAmelCase : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__UpperCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__UpperCAmelCase : List[Any] = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__UpperCAmelCase : str = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__UpperCAmelCase : Optional[Any] = """layernorm.weight"""
if name == "norm.bias":
__UpperCAmelCase : Dict = """layernorm.bias"""
if "head" in name:
__UpperCAmelCase : Tuple = name.replace("""head""" , """classifier""" )
else:
__UpperCAmelCase : int = """focalnet.""" + name
return name
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ):
# fmt: off
__UpperCAmelCase : Dict = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__UpperCAmelCase : int = model_name_to_url[model_name]
print("""Checkpoint URL: """ , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase : Tuple = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
__UpperCAmelCase : Optional[Any] = get_focalnet_config(__lowerCamelCase )
__UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Union[str, Any] = BitImageProcessor(
do_resize=__lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=224 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
__UpperCAmelCase : List[Any] = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__UpperCAmelCase : List[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
__UpperCAmelCase : Dict = model(**__lowerCamelCase )
__UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__UpperCAmelCase : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__UpperCAmelCase : Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__UpperCAmelCase : int = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__UpperCAmelCase : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__UpperCAmelCase : Optional[int] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__UpperCAmelCase : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
a : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] ={"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 412
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
a__ = list(lowerCamelCase )
a__ = degree
def __add__( self , lowerCamelCase ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
a__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCamelCase )
else:
a__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCamelCase )
def __sub__( self , lowerCamelCase ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCamelCase ):
'''simple docstring'''
a__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCamelCase )
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
'''simple docstring'''
a__ = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase )
return polynomial
def __repr__( self ):
'''simple docstring'''
return self.__str__()
def _A ( self ):
'''simple docstring'''
a__ = [0] * self.degree
for i in range(self.degree ):
a__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCamelCase )
def _A ( self , lowerCamelCase = 0 ):
'''simple docstring'''
a__ = [0] * (self.degree + 2)
a__ = constant
for i in range(self.degree + 1 ):
a__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCamelCase )
def __eq__( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCamelCase ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase )
| 412
| 1
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase, lowerCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 433
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ )
| 433
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A : Tuple = logging.get_logger(__name__)
__A : int = TypeVar('DatasetType', Dataset, IterableDataset)
def lowerCAmelCase_ ( a : List[DatasetType] , a : Optional[List[float]] = None , a : Optional[int] = None , a : Optional[DatasetInfo] = None , a : Optional[NamedSplit] = None , a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.''' )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def lowerCAmelCase_ ( a : List[DatasetType] , a : Optional[DatasetInfo] = None , a : Optional[NamedSplit] = None , a : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.''' )
if i == 0:
a__ , a__ = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
| 700
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : int = get_logger()
__A : Optional[dict] = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
a__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
a__ = str(jax.devices()[0] )
a__ = jnp_array_kwargs
@staticmethod
def lowercase__ ( ):
"""simple docstring"""
import jax
return {str(_a ): device for device in jax.devices()}
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a__ = {'dtype': jnp.intaa}
else:
a__ = {'dtype': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
a__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '__array__' ) and not isinstance(_a , jax.Array ):
a__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_row(_a )
a__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_column(_a )
a__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
a__ = self.recursive_tensorize(_a )
a__ = self._consolidate(_a )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_batch(_a )
a__ = self.python_features_decoder.decode_batch(_a )
a__ = self.recursive_tensorize(_a )
for column_name in batch:
a__ = self._consolidate(batch[column_name] )
return batch
| 126
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _A ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.48145466, 0.4578275, 0.40821073] , SCREAMING_SNAKE_CASE_=[0.26862954, 0.26130258, 0.27577711] , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_convert_rgb
def _a (self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _a (self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase__ = []
for i in range(self.batch_size ):
UpperCamelCase__ , UpperCamelCase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase__ = [torch.from_numpy(SCREAMING_SNAKE_CASE_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =ChineseCLIPImageProcessor if is_vision_available() else None
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_ )
@property
def _a (self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _a (self ) -> str:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =ChineseCLIPImageProcessor if is_vision_available() else None
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 3
@property
def _a (self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_convert_rgb''' ) )
def _a (self ) -> Tuple:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 415
|
def __UpperCamelCase ( A = 10**12 ):
UpperCamelCase__ = 1
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 415
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str = '''upernet'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : str=0.4 , lowerCAmelCase__ : Tuple=3_8_4 , lowerCAmelCase__ : Any=2_5_6 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=2_5_5 , **lowerCAmelCase__ : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = backbone_config.get("model_type" )
_UpperCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : List[Any] = config_class.from_dict(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = backbone_config
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Optional[int] = pool_scales
_UpperCAmelCase : Dict = use_auxiliary_head
_UpperCAmelCase : Tuple = auxiliary_loss_weight
_UpperCAmelCase : str = auxiliary_in_channels
_UpperCAmelCase : Optional[Any] = auxiliary_channels
_UpperCAmelCase : Union[str, Any] = auxiliary_num_convs
_UpperCAmelCase : Dict = auxiliary_concat_input
_UpperCAmelCase : str = loss_ignore_index
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Any = self.backbone_config.to_dict()
_UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 257
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: int ):
_UpperCAmelCase : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
_UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
_UpperCAmelCase : Union[str, Any] = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase ( a_: Optional[Any], a_: Union[str, Any], a_: str ):
_UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["model.linear.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = UniSpeechSatForXVector.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[Any] = downstream_dict["connector.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : str = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple, a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : Any = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : Tuple = checkpoint["Downstream"]
_UpperCAmelCase : Dict = UniSpeechSatConfig.from_pretrained(a_ )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
a_, return_attention_mask=a_, do_normalize=a_ )
_UpperCAmelCase : List[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : Union[str, Any] = convert_classification(a_, a_, a_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Any = convert_diarization(a_, a_, a_ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : str = convert_xvector(a_, a_, a_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : Union[str, Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 257
| 1
|
def UpperCamelCase_( lowerCamelCase_ = 100_0000 ) -> int:
_lowercase : int = 1
_lowercase : Any = 1
_lowercase : Dict = {1: 1}
for inputa in range(2 , lowerCamelCase_ ):
_lowercase : Any = 0
_lowercase : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowercase : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowercase : str = counter
if counter > pre_counter:
_lowercase : Optional[Any] = inputa
_lowercase : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 89
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
| 1
|
import csv
import tweepy
# Twitter API credentials
snake_case = """"""
snake_case = """"""
snake_case = """"""
snake_case = """"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
SCREAMING_SNAKE_CASE : Dict = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Dict = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : List[Any] = alltweets[-1].id - 1
print(F'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
SCREAMING_SNAKE_CASE : int = csv.writer(lowercase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 488
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = IFImgaImgSuperResolutionPipeline
UpperCamelCase_ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
UpperCamelCase_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase_ : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _A ( self : List[Any] ):
return self._get_superresolution_dummy_components()
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _A ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A ( self : Tuple ):
self._test_save_load_local()
def _A ( self : List[str] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 488
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = (DPMSolverSDEScheduler,)
UpperCAmelCase__ = 10
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
__UpperCamelCase = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowercase( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> List[str]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> int:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def __lowercase( self ) -> Any:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(_SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE , use_karras_sigmas=_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter.to(_SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
__UpperCamelCase = sample.to(_SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
__UpperCamelCase = scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = output.prev_sample
__UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 383
|
def _a ( __lowercase = 1 , __lowercase = 1000 ) -> int:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = 0
for divide_by_number in range(__lowercase , digit + 1 ):
__UpperCamelCase = []
__UpperCamelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = divide_by_number
else:
has_been_divided.append(__lowercase )
__UpperCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383
| 1
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
def get_masked_lm_array(UpperCamelCase__ : str ):
_snake_case = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_array(UpperCamelCase__ : str ):
_snake_case = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_layer_array(UpperCamelCase__ : int , UpperCamelCase__ : str ):
_snake_case = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
_snake_case = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_snake_case = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_snake_case = array.reshape(_lowerCamelCase )
if "kernel" in name:
_snake_case = array.transpose()
return torch.from_numpy(_lowerCamelCase )
print(F'''Loading model based on config from {config_path}...''' )
_snake_case = BertConfig.from_json_file(_lowerCamelCase )
_snake_case = BertForMaskedLM(_lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_snake_case = model.bert.encoder.layer[layer_index]
# Self-attention
_snake_case = layer.attention.self
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_snake_case = layer.attention.output
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_snake_case = get_encoder_attention_layer_array(
_lowerCamelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_attention_layer_norm/gamma' )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_attention_layer_norm/beta' )
# Intermediate
_snake_case = layer.intermediate
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_intermediate_dense/kernel' )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_intermediate_dense/bias' )
# Output
_snake_case = layer.output
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_output_dense/kernel' )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_output_dense/bias' )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_output_layer_norm/gamma' )
_snake_case = get_encoder_layer_array(_lowerCamelCase , '_output_layer_norm/beta' )
# Embeddings
_snake_case = get_encoder_array('_position_embedding_layer/embeddings' )
_snake_case = get_encoder_array('_type_embedding_layer/embeddings' )
_snake_case = get_encoder_array('_embedding_norm_layer/gamma' )
_snake_case = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_snake_case = model.cls.predictions.transform
_snake_case = get_masked_lm_array('dense/kernel' )
_snake_case = get_masked_lm_array('dense/bias' )
_snake_case = get_masked_lm_array('layer_norm/gamma' )
_snake_case = get_masked_lm_array('layer_norm/beta' )
_snake_case = get_masked_lm_array('embedding_table' )
# Pooling
_snake_case = BertPooler(config=_lowerCamelCase )
_snake_case = get_encoder_array('_pooler_layer/kernel' )
_snake_case = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(_lowerCamelCase )
# Integration test - should load without any errors ;)
_snake_case = BertForMaskedLM.from_pretrained(_lowerCamelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
UpperCAmelCase_ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 719
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=False ) -> str:
'''simple docstring'''
_snake_case = 2
if unlogit:
_snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = p * torch.log(UpperCamelCase__ )
_snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
_snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
_snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case = None
_snake_case = 0.0
_snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
_snake_case = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case = 2
_snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCamelCase__ )
logger.info('Head ranked by importance scores' )
_snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case = torch.arange(
head_importance.numel() , device=args.device )
_snake_case = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
_snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold )
_snake_case = torch.ones_like(UpperCamelCase__ )
_snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case = float('Inf' )
_snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case = new_head_mask.view(-1 )
_snake_case = 0.0
_snake_case = new_head_mask.view_as(UpperCamelCase__ )
_snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
_snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case = torch.device('cuda' , args.local_rank )
_snake_case = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
_snake_case = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Prepare dataset
_snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case = (torch.from_numpy(UpperCamelCase__ ),)
_snake_case = TensorDataset(*UpperCamelCase__ )
_snake_case = RandomSampler(UpperCamelCase__ )
_snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 541
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def _lowerCAmelCase ( __magic_name__ : str ) -> int:
for pegasus_name, hf_name in PATTERNS:
lowercase : Dict =k.replace(__magic_name__ , __magic_name__ )
return k
def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : dict ) -> PegasusForConditionalGeneration:
lowercase : str =DEFAULTS.copy()
cfg_kwargs.update(__magic_name__ )
lowercase : List[str] =PegasusConfig(**__magic_name__ )
lowercase : str =PegasusForConditionalGeneration(__magic_name__ )
lowercase : Dict =torch_model.model.state_dict()
lowercase : Dict ={}
for k, v in tf_weights.items():
lowercase : Union[str, Any] =rename_state_dict_key(__magic_name__ )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase : Optional[Any] =v.T
lowercase : Optional[int] =torch.tensor(__magic_name__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase : Any =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Tuple =mapping['''shared.weight''']
lowercase : Optional[int] =mapping['''shared.weight''']
lowercase : Optional[int] ={k: torch.zeros_like(__magic_name__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__magic_name__ )
lowercase , lowercase : List[Any] =torch_model.model.load_state_dict(__magic_name__ , strict=__magic_name__ )
lowercase : List[str] =[
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _lowerCAmelCase ( __magic_name__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
lowercase : Dict =tf.train.list_variables(__magic_name__ )
lowercase : int ={}
lowercase : Dict =['''Adafactor''', '''global_step''']
for name, shape in tqdm(__magic_name__ , desc='''converting tf checkpoint to dict''' ):
lowercase : int =any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : List[Any] =tf.train.load_variable(__magic_name__ , __magic_name__ )
lowercase : str =array
return tf_weights
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[Any]:
# save tokenizer first
lowercase : Tuple =Path(__magic_name__ ).parent.name
lowercase : Optional[Any] =task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
lowercase : List[Any] =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__magic_name__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__magic_name__ )
# convert model
lowercase : Tuple =get_tf_weights_as_numpy(__magic_name__ )
lowercase : str =task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase : Union[str, Any] =task_specific_params
lowercase : Any =convert_pegasus(__magic_name__ , __magic_name__ )
torch_model.save_pretrained(__magic_name__ )
lowercase : Dict =torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__magic_name__ , Path(__magic_name__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 92
|
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : List[str] = """new-model"""
if is_tf_available():
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : Any = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = """bert-base-cased"""
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = """bert-base-cased"""
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__lowercase ,__lowercase = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__lowercase ,__lowercase = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> int:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__lowercase ,__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> int:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__lowercase ,__lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_4410 )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_4410 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = copy.deepcopy(model.config )
__lowercase = ["""FunnelBaseModel"""]
__lowercase = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__lowercase = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("""new-model""" , lowerCamelCase__ )
__lowercase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = BertModelTester(self ).get_config()
__lowercase = NewModelConfig(**tiny_config.to_dict() )
__lowercase = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__lowercase = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase = TFAutoModel.from_pretrained("""bert-base""" )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase = TFAutoModel.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def snake_case__ ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , """Use `from_pt=True` to load this model""" ):
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowercase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 163
|
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case_ ( a__ : str ):
"""simple docstring"""
__lowercase = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(a__ ).json()
def snake_case_ ( a__ : int = 10 ):
"""simple docstring"""
__lowercase = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowercase = requests.get(a__ ).json()[:max_stories]
return [get_hackernews_story(a__ ) for story_id in story_ids]
def snake_case_ ( a__ : int = 10 ):
"""simple docstring"""
__lowercase = hackernews_top_stories(a__ )
return "\n".join("""* [{title}]({url})""".format(**a__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 163
| 1
|
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
if n_term == "":
return []
__SCREAMING_SNAKE_CASE = []
for temp in range(int(A__ ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
_snake_case : Optional[Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 693
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35
| 0
|
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_UpperCAmelCase =str(bin(_lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_UpperCAmelCase =str(bin(_lowerCamelCase ) )[2:]
if shift_amount >= len(_lowerCamelCase ):
return "0b0"
_UpperCAmelCase =binary_number[: len(_lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
if number >= 0: # Get binary representation of positive number
_UpperCAmelCase ="0" + str(bin(_lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
_UpperCAmelCase =len(bin(_lowerCamelCase )[3:] ) # Find 2's complement of number
_UpperCAmelCase =bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
_UpperCAmelCase =(
"1" + "0" * (binary_number_length - len(_lowerCamelCase )) + binary_number
)
if shift_amount >= len(_lowerCamelCase ):
return "0b" + binary_number[0] * len(_lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( A__ ):
"""simple docstring"""
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
_UpperCAmelCase =BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase =bertabert.config.encoder.vocab_size
_UpperCAmelCase =tokenizer.sep_token_id
_UpperCAmelCase =tokenizer.cls_token_id
_UpperCAmelCase =128
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
_UpperCAmelCase =datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
_UpperCAmelCase =train_dataset.select(range(32 ) )
_UpperCAmelCase =val_dataset.select(range(16 ) )
_UpperCAmelCase =4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase =tokenizer(batch["article"] , padding="max_length" , truncation=_snake_case , max_length=512 )
_UpperCAmelCase =tokenizer(batch["highlights"] , padding="max_length" , truncation=_snake_case , max_length=128 )
_UpperCAmelCase =inputs.input_ids
_UpperCAmelCase =inputs.attention_mask
_UpperCAmelCase =outputs.input_ids
_UpperCAmelCase =outputs.input_ids.copy()
_UpperCAmelCase =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_UpperCAmelCase =outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCAmelCase =pred.label_ids
_UpperCAmelCase =pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCAmelCase =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
_UpperCAmelCase =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
_UpperCAmelCase =self.get_auto_remove_tmp_dir()
_UpperCAmelCase =SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy="steps" , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase =SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 592
| 1
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 10
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : int ):
snake_case__ : Optional[Any] = 4
snake_case__ : Optional[Any] = 3
snake_case__ : List[Any] = (3_2, 3_2)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : List[Any] = jax.random.uniform(_lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Optional[Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
snake_case__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 170
| 0
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase : List[str] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCamelCase : Any = logging.WARNING
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = os.getenv('DATASETS_VERBOSITY' , lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] = None ):
'''simple docstring'''
if name is None:
lowerCamelCase_ = _get_library_name()
return logging.getLogger(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
_get_library_root_logger().setLevel(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
return set_verbosity(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = False
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A:
'''simple docstring'''
def __init__( self : Any , *A_ : str , **A_ : Optional[Any] ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
lowerCamelCase_ = args[0] if args else None
def __iter__( self : Optional[int] ) -> Any:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , A_ : Optional[int] ) -> Dict:
"""simple docstring"""
def empty_fn(*A_ : Optional[int] , **A_ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self
def __exit__( self : int , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return
lowerCamelCase : Dict = True
class A:
'''simple docstring'''
def __call__( self : Dict , *A_ : int , A_ : Any=False , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def a__ ( self : str , *A_ : Union[str, Any] , **A_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def a__ ( self : int ) -> Any:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase : Union[str, Any] = _tqdm_cls()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
lowerCamelCase_ = True
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
global _tqdm_active
lowerCamelCase_ = False
| 710
|
class A:
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = {}
def a__ ( self : Union[str, Any] , A_ : List[Any] ) -> int:
"""simple docstring"""
if vertex not in self.adjacency:
lowerCamelCase_ = {}
self.num_vertices += 1
def a__ ( self : int , A_ : int , A_ : Optional[Any] , A_ : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(A_ )
self.add_vertex(A_ )
if head == tail:
return
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(A_ ) ):
lowerCamelCase_ = list(edges[i] )
edges.sort(key=lambda A_ : e[2] )
for i in range(len(A_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase_ = edges[i][2] + 1
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def __str__( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def a__ ( A_ : Optional[Any]=None , A_ : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Graph()
if vertices is None:
lowerCamelCase_ = []
if edges is None:
lowerCamelCase_ = []
for vertex in vertices:
g.add_vertex(A_ )
for edge in edges:
g.add_edge(*A_ )
return g
class A:
'''simple docstring'''
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = {}
lowerCamelCase_ = {}
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.parent )
def a__ ( self : List[str] , A_ : Any ) -> Dict:
"""simple docstring"""
if item in self.parent:
return self.find(A_ )
lowerCamelCase_ = item
lowerCamelCase_ = 0
return item
def a__ ( self : List[str] , A_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(A_ )
if item != self.parent[item]:
lowerCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self : Any , A_ : int , A_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.find(A_ )
lowerCamelCase_ = self.find(A_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase_ = roota
return roota
return None
@staticmethod
def a__ ( A_ : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = graph.num_vertices
lowerCamelCase_ = Graph.UnionFind()
lowerCamelCase_ = []
while num_components > 1:
lowerCamelCase_ = {}
for vertex in graph.get_vertices():
lowerCamelCase_ = -1
lowerCamelCase_ = graph.get_edges()
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edge
lowerCamelCase_ = union_find.find(A_ )
lowerCamelCase_ = union_find.find(A_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cheap_edge[vertex]
if union_find.find(A_ ) != union_find.find(A_ ):
union_find.union(A_ , A_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase_ = num_components - 1
lowerCamelCase_ = Graph.build(edges=A_ )
return mst
| 651
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Tuple ,*a__ : Any ,**a__ : List[str] ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." ,a__ ,)
super().__init__(*a__ ,**a__ )
| 331
|
'''simple docstring'''
import json
import sys
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase , encoding="utf-8" ) as f:
a__ = json.load(_lowercase )
a__ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_lowercase ):
a__ = results[benchmark_name]
a__ = benchmark_name.split("/" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
a__ = "| metric |"
a__ = "|--------|"
a__ = "| new / old (diff) |"
for metric_name in sorted(_lowercase ):
a__ = benchmark_res[metric_name]
a__ = metric_vals["new"]
a__ = metric_vals.get("old" , _lowercase )
a__ = metric_vals.get("diff" , _lowercase )
a__ = F' {new_val:f}' if isinstance(_lowercase , (int, float) ) else "None"
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(_lowercase , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(_lowercase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_lowercase ) )
if __name__ == "__main__":
UpperCamelCase_ : Dict = sys.argv[1]
UpperCamelCase_ : int = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 331
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( snake_case ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 494
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCAmelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCAmelCase__ = """summarizer"""
lowerCAmelCase__ = AutoTokenizer
lowerCAmelCase__ = AutoModelForSeqaSeqLM
lowerCAmelCase__ = ["""text"""]
lowerCAmelCase__ = ["""text"""]
def __A ( self , a__ ):
return self.pre_processor(a__ , return_tensors='pt' , truncation=a__ )
def __A ( self , a__ ):
return self.model.generate(**a__ )[0]
def __A ( self , a__ ):
return self.pre_processor.decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
| 494
| 1
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = (1 - _cos) / 2
UpperCAmelCase_ : int = 1 - _cos
UpperCAmelCase_ : str = 1 + alpha
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = (1 + _cos) / 2
UpperCAmelCase_ : str = -1 - _cos
UpperCAmelCase_ : List[str] = 1 + alpha
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Optional[int] = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : str = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = -ba
UpperCAmelCase_ : Dict = 1 + alpha
UpperCAmelCase_ : Any = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Dict = 1 + alpha * big_a
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : Any = 1 + alpha / big_a
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tau * frequency / samplerate
UpperCAmelCase_ : Dict = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = _sin / (2 * q_factor)
UpperCAmelCase_ : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Tuple = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : int = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase_ : Optional[Any] = big_a * (pmc + aaa)
UpperCAmelCase_ : Any = 2 * big_a * mpc
UpperCAmelCase_ : Any = big_a * (pmc - aaa)
UpperCAmelCase_ : Tuple = ppmc + aaa
UpperCAmelCase_ : int = -2 * pmpc
UpperCAmelCase_ : Any = ppmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = cos(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Optional[int] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : str = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Optional[Any] = 2 * sqrt(_SCREAMING_SNAKE_CASE ) * alpha
UpperCAmelCase_ : int = big_a * (ppmc + aaa)
UpperCAmelCase_ : Tuple = -2 * big_a * pmpc
UpperCAmelCase_ : Tuple = big_a * (ppmc - aaa)
UpperCAmelCase_ : List[Any] = pmc + aaa
UpperCAmelCase_ : List[str] = 2 * mpc
UpperCAmelCase_ : Dict = pmc - aaa
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 71
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 1_6
_lowerCamelCase : Any = 3_2
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Dict = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : str = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
_lowerCAmelCase : str = 2
# Initialize accelerator
_lowerCAmelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[str] = config["""lr"""]
_lowerCAmelCase : Dict = int(config["""num_epochs"""] )
_lowerCAmelCase : Dict = int(config["""seed"""] )
_lowerCAmelCase : Any = int(config["""batch_size"""] )
_lowerCAmelCase : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase_ )
def inner_training_loop(UpperCamelCase_ : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : int = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : List[Any] = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : str = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ )
_lowerCAmelCase : int = outputs.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Any = model(**UpperCamelCase_ )
_lowerCAmelCase : str = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
_lowerCAmelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_lowerCAmelCase : Any = parser.parse_args()
_lowerCAmelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 429
| 0
|
'''simple docstring'''
from __future__ import annotations
import queue
class lowerCamelCase_ :
def __init__( self : int , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Tuple = None
def UpperCAmelCase ( ):
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : List[Any] = TreeNode(int(__lowerCAmelCase ) )
q.put(__lowerCAmelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Tuple = q.get()
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""Enter the left node of {node_found.data}: """
SCREAMING_SNAKE_CASE : Any = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : List[Any] = TreeNode(int(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = left_node
q.put(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = F"""Enter the right node of {node_found.data}: """
SCREAMING_SNAKE_CASE : str = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE : List[str] = right_node
q.put(__lowerCAmelCase )
raise
def UpperCAmelCase ( A : Union[str, Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def UpperCAmelCase ( A : str ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def UpperCAmelCase ( A : Optional[Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def UpperCAmelCase ( A : List[str] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCAmelCase ( A : List[Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCAmelCase )
def UpperCAmelCase ( A : Union[str, Any] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : str = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : List[Any] = n.right
def UpperCAmelCase ( A : str ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n:
stack.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = n.left
SCREAMING_SNAKE_CASE : int = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE : int = n.right
def UpperCAmelCase ( A : Optional[int] ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Any = node
stacka.append(__lowerCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def UpperCAmelCase ( A : List[Any] = "" , A : str=50 , A : int="*" ):
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE : Optional[Any] = divmod(width - len(__lowerCAmelCase ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCAmelCase_ : int = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 718
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def UpperCAmelCase ( A : int , A : Tuple , A : Any ):
SCREAMING_SNAKE_CASE : int = hf_hub_url(repo_id=A , path=A , revision=A )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(A )}"""
| 464
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for step in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "schedule.bin" )
torch.save(scheduler.state_dict() , SCREAMING_SNAKE_CASE__ )
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase , weight_decay=0.0 , relative_step=lowerCamelCase , scale_parameter=lowerCamelCase , warmup_init=lowerCamelCase , )
for _ in range(1_000 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : str = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ : List[str] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase__ : List[Any] = 10
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase , msg=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_snake_case = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_snake_case , _snake_case = data
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_snake_case = unwrap_schedule(lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase , lowerCamelCase , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase ) # wrap to test picklability of the schedule
_snake_case = unwrap_and_save_reload_schedule(lowerCamelCase , self.num_steps )
self.assertListEqual(lowerCamelCase , lowerCamelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = fn
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
return self.fn(*lowerCamelCase , **lowerCamelCase )
@classmethod
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = list(map(self , scheduler.lr_lambdas ) )
| 672
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672
| 1
|
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase )
if is_prime(lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A( unittest.TestCase ):
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length), A )
_UpperCamelCase = torch.ones((batch_size, length), device=A, dtype=torch.float ) / length
return input_ids, scores
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 10 )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 10 )
with self.assertWarns(A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList(), 11 )
self.assertEqual(len(A ), 1 )
| 105
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('Enter integers separated by spaces: ')
UpperCAmelCase_ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 533
|
'''simple docstring'''
import numpy as np
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : List[Any] = (0, 0)
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __eq__( self , __snake_case ):
return self.position == cell.position
def UpperCAmelCase_ ( self ):
print(self.position )
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case=(5, 5) ):
_SCREAMING_SNAKE_CASE : str = np.zeros(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = world_size[0]
_SCREAMING_SNAKE_CASE : Tuple = world_size[1]
def UpperCAmelCase_ ( self ):
print(self.w )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[0]
_SCREAMING_SNAKE_CASE : Optional[int] = cell.position[1]
_SCREAMING_SNAKE_CASE : Tuple = []
for n in neughbour_cord:
_SCREAMING_SNAKE_CASE : Optional[Any] = current_x + n[0]
_SCREAMING_SNAKE_CASE : List[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_SCREAMING_SNAKE_CASE : Optional[int] = Cell()
_SCREAMING_SNAKE_CASE : Union[str, Any] = (x, y)
_SCREAMING_SNAKE_CASE : Dict = cell
neighbours.append(__snake_case )
return neighbours
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
_SCREAMING_SNAKE_CASE : Any = np.argmin([n.f for n in _open] )
_SCREAMING_SNAKE_CASE : List[Any] = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
_SCREAMING_SNAKE_CASE : Optional[int] = current.g + 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = n.position
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = goal.position
_SCREAMING_SNAKE_CASE : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
_SCREAMING_SNAKE_CASE : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = []
while current.parent is not None:
path.append(current.position )
_SCREAMING_SNAKE_CASE : List[Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Gridworld()
# Start position and goal
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (0, 0)
UpperCAmelCase_ : Optional[Any] = Cell()
UpperCAmelCase_ : Tuple = (4, 4)
print(F"path from {start.position} to {goal.position}")
UpperCAmelCase_ : Union[str, Any] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Optional[int] = 1
print(world.w)
| 533
| 1
|
"""simple docstring"""
import socket
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_SCREAMING_SNAKE_CASE = socket.gethostname()
_SCREAMING_SNAKE_CASE = 12_312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
_SCREAMING_SNAKE_CASE = sock.recv(1_024 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 168
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 128
elif "12-12" in model_name:
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
_SCREAMING_SNAKE_CASE = 14
_SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('''Model not supported''' )
_SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = 35
_SCREAMING_SNAKE_CASE = '''speech-commands-v2-id2label.json'''
else:
_SCREAMING_SNAKE_CASE = 527
_SCREAMING_SNAKE_CASE = '''audioset-id2label.json'''
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_SCREAMING_SNAKE_CASE = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if "module.v" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_SCREAMING_SNAKE_CASE = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_SCREAMING_SNAKE_CASE = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_SCREAMING_SNAKE_CASE = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_SCREAMING_SNAKE_CASE = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_SCREAMING_SNAKE_CASE = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_SCREAMING_SNAKE_CASE = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_SCREAMING_SNAKE_CASE = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_SCREAMING_SNAKE_CASE = key.split('''.''' )
_SCREAMING_SNAKE_CASE = int(key_split[3] )
_SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
@torch.no_grad()
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )
# remove some keys
remove_keys(UpperCamelCase__ )
# rename some keys
_SCREAMING_SNAKE_CASE = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# load 🤗 model
_SCREAMING_SNAKE_CASE = ASTForAudioClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_SCREAMING_SNAKE_CASE = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
_SCREAMING_SNAKE_CASE = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
_SCREAMING_SNAKE_CASE = 1_024 if '''speech-commands''' not in model_name else 128
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=UpperCamelCase__ , std=UpperCamelCase__ , max_length=UpperCamelCase__ )
if "speech-commands" in model_name:
_SCREAMING_SNAKE_CASE = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_SCREAMING_SNAKE_CASE = dataset[0]['''audio''']['''array''']
else:
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = torchaudio.load(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
_SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase__ , sampling_rate=16_000 , return_tensors='''pt''' )
# forward pass
_SCREAMING_SNAKE_CASE = model(**UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase : List[str] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 168
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_UpperCAmelCase = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_UpperCAmelCase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__lowercase )[0]
@deprecated(__lowercase ,'Please use tf.data to implement this functionality.' )
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A_ : str = _readaa(__lowercase )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
A_ : Optional[int] = _readaa(__lowercase )
A_ : List[Any] = _readaa(__lowercase )
A_ : Any = _readaa(__lowercase )
A_ : List[str] = bytestream.read(rows * cols * num_images )
A_ : Optional[int] = numpy.frombuffer(__lowercase ,dtype=numpy.uinta )
A_ : Union[str, Any] = data.reshape(__lowercase ,__lowercase ,__lowercase ,1 )
return data
@deprecated(__lowercase ,'Please use tf.one_hot on tensors.' )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : int ):
'''simple docstring'''
A_ : Dict = labels_dense.shape[0]
A_ : List[str] = numpy.arange(__lowercase ) * num_classes
A_ : Any = numpy.zeros((num_labels, num_classes) )
A_ : List[Any] = 1
return labels_one_hot
@deprecated(__lowercase ,'Please use tf.data to implement this functionality.' )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Tuple=False ,__lowercase : List[str]=10 ):
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A_ : int = _readaa(__lowercase )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
A_ : List[Any] = _readaa(__lowercase )
A_ : Dict = bytestream.read(__lowercase )
A_ : List[str] = numpy.frombuffer(__lowercase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase ,__lowercase )
return labels
class UpperCAmelCase :
'''simple docstring'''
@deprecated(
lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ):
"""simple docstring"""
A_ , A_ : Dict = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A_ : Optional[int] = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
A_ : Optional[int] = 1_0_0_0_0
A_ : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
A_ : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A_ : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A_ : Dict = images.astype(numpy.floataa )
A_ : Optional[int] = numpy.multiply(lowercase , 1.0 / 255.0 )
A_ : Optional[int] = images
A_ : List[Any] = labels
A_ : str = 0
A_ : Union[str, Any] = 0
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._images
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._labels
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._num_examples
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._epochs_completed
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=True ):
"""simple docstring"""
if fake_data:
A_ : List[Any] = [1] * 7_8_4
A_ : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
A_ : int = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A_ : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A_ : Dict = self.images[perma]
A_ : Optional[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A_ : List[Any] = self._num_examples - start
A_ : Optional[int] = self._images[start : self._num_examples]
A_ : Optional[int] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A_ : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A_ : Dict = self.images[perm]
A_ : Any = self.labels[perm]
# Start next epoch
A_ : Any = 0
A_ : Optional[Any] = batch_size - rest_num_examples
A_ : Optional[int] = self._index_in_epoch
A_ : Dict = self._images[start:end]
A_ : Any = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A_ : Tuple = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase ,'Please write your own downloading logic.' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[int] ,__lowercase : Any ):
'''simple docstring'''
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
A_ : Optional[Any] = os.path.join(__lowercase ,__lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase ,__lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
A_ : Optional[int] = f.size()
print('Successfully downloaded' ,__lowercase ,__lowercase ,'bytes.' )
return filepath
@deprecated(
__lowercase ,'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : str=False ,__lowercase : int=False ,__lowercase : str=dtypes.floataa ,__lowercase : int=True ,__lowercase : List[Any]=50_00 ,__lowercase : Union[str, Any]=None ,__lowercase : Union[str, Any]=DEFAULT_SOURCE_URL ,):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__lowercase ,one_hot=__lowercase ,dtype=__lowercase ,seed=__lowercase )
A_ : Union[str, Any] = fake()
A_ : Any = fake()
A_ : str = fake()
return _Datasets(train=__lowercase ,validation=__lowercase ,test=__lowercase )
if not source_url: # empty string check
A_ : Tuple = DEFAULT_SOURCE_URL
A_ : List[Any] = 'train-images-idx3-ubyte.gz'
A_ : Optional[int] = 'train-labels-idx1-ubyte.gz'
A_ : Optional[Any] = 't10k-images-idx3-ubyte.gz'
A_ : Optional[int] = 't10k-labels-idx1-ubyte.gz'
A_ : str = _maybe_download(
__lowercase ,__lowercase ,source_url + train_images_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : Optional[int] = _extract_images(__lowercase )
A_ : Optional[Any] = _maybe_download(
__lowercase ,__lowercase ,source_url + train_labels_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : Union[str, Any] = _extract_labels(__lowercase ,one_hot=__lowercase )
A_ : Any = _maybe_download(
__lowercase ,__lowercase ,source_url + test_images_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : str = _extract_images(__lowercase )
A_ : Union[str, Any] = _maybe_download(
__lowercase ,__lowercase ,source_url + test_labels_file )
with gfile.Open(__lowercase ,'rb' ) as f:
A_ : int = _extract_labels(__lowercase ,one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
A_ : Dict = (
'Validation size should be between 0 and '
f'''{len(__lowercase )}. Received: {validation_size}.'''
)
raise ValueError(__lowercase )
A_ : int = train_images[:validation_size]
A_ : List[str] = train_labels[:validation_size]
A_ : Tuple = train_images[validation_size:]
A_ : Dict = train_labels[validation_size:]
A_ : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
A_ : Optional[int] = _DataSet(__lowercase ,__lowercase ,**__lowercase )
A_ : int = _DataSet(__lowercase ,__lowercase ,**__lowercase )
A_ : int = _DataSet(__lowercase ,__lowercase ,**__lowercase )
return _Datasets(train=__lowercase ,validation=__lowercase ,test=__lowercase )
| 558
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 558
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 1
@register_to_config
def __init__( self : str , __snake_case : Dict=2000 , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=20 , __snake_case : Optional[int]=1e-3 ):
lowerCamelCase :Any = None
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :Optional[Any] = None
def snake_case ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, torch.device] = None ):
lowerCamelCase :List[Any] = torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def snake_case ( self : Any , __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : int=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase :Any = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase :str = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase :Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase :Any = std.unsqueeze(-1 )
lowerCamelCase :Optional[int] = -score / std
# compute
lowerCamelCase :List[str] = -1.0 / len(self.timesteps )
lowerCamelCase :Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase :int = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase :Any = beta_t.unsqueeze(-1 )
lowerCamelCase :List[Any] = -0.5 * beta_t * x
lowerCamelCase :List[Any] = torch.sqrt(__snake_case )
lowerCamelCase :Union[str, Any] = drift - diffusion**2 * score
lowerCamelCase :Optional[int] = x + drift * dt
# add noise
lowerCamelCase :Tuple = randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
lowerCamelCase :str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 49
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase (_A , _A , _A , _A = 1_0_0 , ):
"""simple docstring"""
_lowerCAmelCase : List[str] = x_start
_lowerCAmelCase : Optional[Any] = fnc(_A )
_lowerCAmelCase : Union[str, Any] = 0.0
for _ in range(_A ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCAmelCase : Dict = (x_end - x_start) / steps + xa
_lowerCAmelCase : Optional[Any] = fnc(_A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCAmelCase : Union[str, Any] = xa
_lowerCAmelCase : Optional[Any] = fxa
return length
if __name__ == "__main__":
def lowercase (_A ):
"""simple docstring"""
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCAmelCase : str = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 444
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : str = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = GPTaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
_lowerCAmelCase : str = kwargs.pop('add_bos_token' , snake_case__ )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : str = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : Dict = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Any = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
_lowerCAmelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 444
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 658
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658
| 1
|
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = b, a % b
return a
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowercase__ , a % b )
def _a ( ):
'''simple docstring'''
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 85
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85
| 1
|
def lowerCamelCase_ ( A : int = 3 , A : int = 7 , A : int = 1_00_00_00 ):
"""simple docstring"""
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
for current_denominator in range(1 , limit + 1 ):
lowerCAmelCase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCAmelCase_ = current_numerator
lowerCAmelCase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 716
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_attention_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_choices
def lowercase__ ( self):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ = None
if self.use_attention_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
a :Optional[Any] = True
a :Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self):
lowerCAmelCase_ = FlaxRobertaModelTester(self)
@slow
def lowercase__ ( self):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase)
lowerCAmelCase_ = model(np.ones((1, 1)))
self.assertIsNotNone(_UpperCAmelCase)
| 413
| 0
|
from __future__ import annotations
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[tuple[int, int]]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = position
__UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCAmelCase = []
for position in positions:
__UpperCAmelCase , __UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE )
return permissible_positions
def __a ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if is_complete(SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase , __UpperCAmelCase = position
if board[y][x] == 0:
__UpperCAmelCase = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ):
return True
__UpperCAmelCase = 0
return False
def __a ( SCREAMING_SNAKE_CASE ) -> list[list[int]]:
'''simple docstring'''
__UpperCAmelCase = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
__UpperCAmelCase = 0
__UpperCAmelCase = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = CustomTokenizer
pass
| 303
| 1
|
def a(lowercase__ = 100 ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = 0
snake_case_ = n + 1 # maximum limit
for a in range(2 , lowercase__ ):
for b in range(2 , lowercase__ ):
snake_case_ = a**b # calculates the current power
collect_powers.add(lowercase__ ) # adds the result to the set
return len(lowercase__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 46
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46
| 1
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowercase = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowercase = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCamelCase ( self , A__ , A__ , A__=None , A__=True , A__=False ) -> List[Any]:
if rouge_types is None:
snake_case = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
snake_case = rouge_scorer.RougeScorer(rouge_types=A__ , use_stemmer=A__ )
if use_aggregator:
snake_case = scoring.BootstrapAggregator()
else:
snake_case = []
for ref, pred in zip(A__ , A__ ):
snake_case = scorer.score(A__ , A__ )
if use_aggregator:
aggregator.add_scores(A__ )
else:
scores.append(A__ )
if use_aggregator:
snake_case = aggregator.aggregate()
else:
snake_case = {}
for key in scores[0]:
snake_case = [score[key] for score in scores]
return result
| 342
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_lowercase = 100
_lowercase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_lowercase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __UpperCamelCase ( a : int ) ->set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case = set()
snake_case = 42
snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __UpperCamelCase ( a : int = 5000 ) ->int | None:
for number_to_partition in range(1 , a ):
if len(partition(a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 342
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["flax", "transformers"]
def __init__( self : Tuple , *_A : int , **_A : Dict):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , *_A : Optional[int] , **_A : List[str]):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : int , *_A : Union[str, Any] , **_A : Any):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["flax", "transformers"]
def __init__( self : str , *_A : str , **_A : Optional[Any]):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , *_A : Any , **_A : List[str]):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Tuple , *_A : str , **_A : List[Any]):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["flax", "transformers"]
def __init__( self : List[Any] , *_A : Tuple , **_A : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , *_A : int , **_A : str):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Dict , *_A : List[str] , **_A : Dict):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
class _snake_case ( metaclass=__snake_case ):
"""simple docstring"""
a = ["flax", "transformers"]
def __init__( self : List[str] , *_A : int , **_A : int):
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Tuple , *_A : Optional[int] , **_A : Dict):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , *_A : int , **_A : Dict):
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""])
| 635
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase_ : List[Any] = 3
def UpperCamelCase ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
print("Generating primitive root of p" )
while True:
_lowercase : Any = random.randrange(3 , _UpperCAmelCase )
if pow(_UpperCAmelCase , 2 , _UpperCAmelCase ) == 1:
continue
if pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) == 1:
continue
return g
def UpperCamelCase ( _UpperCAmelCase : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_lowercase : Optional[int] = rabin_miller.generate_large_prime(_UpperCAmelCase ) # select large prime number.
_lowercase : Dict = primitive_root(_UpperCAmelCase ) # one primitive root on modulo p.
_lowercase : Any = random.randrange(3 , _UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
_lowercase : Tuple = cryptomath.find_mod_inverse(pow(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
_lowercase : List[Any] = (key_size, e_a, e_a, p)
_lowercase : Union[str, Any] = (key_size, d)
return public_key, private_key
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_lowercase , _lowercase : Optional[Any] = generate_key(_UpperCAmelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 461
| 0
|
from __future__ import annotations
import bisect
def _lowerCamelCase ( __A : Tuple , __A : int , __A : int = 0 , __A : List[Any] = -1 ) -> int:
if hi < 0:
_UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
_UpperCAmelCase : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : Any = mid + 1
else:
_UpperCAmelCase : List[str] = mid
return lo
def _lowerCamelCase ( __A : Any , __A : Dict , __A : str = 0 , __A : Union[str, Any] = -1 ) -> int:
if hi < 0:
_UpperCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
_UpperCAmelCase : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : int = mid + 1
else:
_UpperCAmelCase : List[Any] = mid
return lo
def _lowerCamelCase ( __A : Union[str, Any] , __A : List[str] , __A : Tuple = 0 , __A : Any = -1 ) -> None:
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( __A : Optional[int] , __A : Dict , __A : int = 0 , __A : Union[str, Any] = -1 ) -> None:
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( __A : int , __A : Optional[int] ) -> int | None:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
_UpperCAmelCase : int = left + (right - left) // 2
_UpperCAmelCase : Optional[int] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : List[str] = midpoint - 1
else:
_UpperCAmelCase : Optional[Any] = midpoint + 1
return None
def _lowerCamelCase ( __A : Optional[int] , __A : Union[str, Any] ) -> int | None:
_UpperCAmelCase : Union[str, Any] = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def _lowerCamelCase ( __A : Any , __A : Optional[int] , __A : str , __A : Dict ) -> int | None:
if right < left:
return None
_UpperCAmelCase : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by comma:\n').strip()
SCREAMING_SNAKE_CASE = sorted(int(item) for item in user_input.split(','))
SCREAMING_SNAKE_CASE = int(input('Enter a single number to be found in the list:\n'))
SCREAMING_SNAKE_CASE = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 701
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186
| 0
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__SCREAMING_SNAKE_CASE : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase : List[Any] = g.get_repo("huggingface/transformers" )
_UpperCAmelCase : str = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase : int = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase )
_UpperCAmelCase : List[str] = comments[0] if len(_UpperCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 244
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = "linear"
__UpperCamelCase: Tuple = "cosine"
__UpperCamelCase: Optional[int] = "cosine_with_restarts"
__UpperCamelCase: str = "polynomial"
__UpperCamelCase: int = "constant"
__UpperCamelCase: Any = "constant_with_warmup"
__UpperCamelCase: Optional[Any] = "piecewise_constant"
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
return LambdaLR(_UpperCAmelCase , lambda _UpperCAmelCase : 1 , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int = -1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1.0 , _UpperCAmelCase ) )
return 1.0
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : str , _UpperCAmelCase : int = -1 ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : Union[str, Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = rule_str.split(":" )
_UpperCAmelCase : Dict = int(_UpperCAmelCase )
_UpperCAmelCase : int = float(_UpperCAmelCase )
_UpperCAmelCase : Dict = value
_UpperCAmelCase : List[str] = float(rule_list[-1] )
def create_rules_function(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
def rule_func(_UpperCAmelCase : int ) -> float:
_UpperCAmelCase : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase : Optional[Any] = create_rules_function(_UpperCAmelCase , _UpperCAmelCase )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float = 0.5 , _UpperCAmelCase : int = -1 ) -> Any:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : List[Any] ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
_UpperCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=1e-7 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase : int = lr_init - lr_end
_UpperCAmelCase : Optional[Any] = num_training_steps - num_warmup_steps
_UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, SchedulerType] , _UpperCAmelCase : Optimizer , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : int = -1 , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = SchedulerType(_UpperCAmelCase )
_UpperCAmelCase : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCAmelCase , last_epoch=_UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCAmelCase , step_rules=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , num_cycles=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , power=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
| 244
| 1
|
from __future__ import annotations
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case ):
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : Dict = self
_lowerCAmelCase : Tuple = 0
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# create a new set with x as its member
_lowerCAmelCase : List[str] = DisjointSetTreeNode(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# find the set x belongs to (with path-compression)
_lowerCAmelCase : Dict = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# helper function for union operation
if nodea.rank > nodea.rank:
_lowerCAmelCase : int = nodea
else:
_lowerCAmelCase : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# merge 2 disjoint sets
self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) )
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
_lowerCAmelCase : Any = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
# add an edge with the given weight
self.add_node(_snake_case )
self.add_node(_snake_case )
_lowerCAmelCase : int = weight
_lowerCAmelCase : Optional[int] = weight
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _snake_case : x[2] )
# creating the disjoint set
_lowerCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_snake_case )
# MST generation
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_snake_case )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_snake_case , _snake_case , _snake_case )
disjoint_set.union(_snake_case , _snake_case )
return graph
| 587
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase : str = s_dict.pop(lowerCAmelCase__ )
elif "subsample" in key:
_lowerCAmelCase : Optional[Any] = s_dict.pop(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Tuple = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )
_lowerCAmelCase : Dict = mam_aaa["args"]
_lowerCAmelCase : Optional[Any] = mam_aaa["model"]
_lowerCAmelCase : Optional[Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(lowerCAmelCase__ )
rename_keys(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
_lowerCAmelCase : Dict = args.share_decoder_input_output_embed
_lowerCAmelCase : str = [int(lowerCAmelCase__ ) for i in args.conv_kernel_sizes.split("," )]
_lowerCAmelCase : Any = SpeechaTextConfig(
vocab_size=lowerCAmelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(lowerCAmelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCAmelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCAmelCase__ , num_beams=5 , max_length=2_00 , use_cache=lowerCAmelCase__ , decoder_start_token_id=2 , early_stopping=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = SpeechaTextForConditionalGeneration(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0 and not set(lowerCAmelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : Dict = lm_head_weights
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 587
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(_a , max_perimeter + 1):
SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_a):
SCREAMING_SNAKE_CASE : int = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ ( _a = 1000):
SCREAMING_SNAKE_CASE : List[str] = pythagorean_triple(_a)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 25
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 25
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = 42
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Union[str, Any] , snake_case_ : int = 32 , snake_case_ : int = 64 , snake_case_ : int = 20 , snake_case_ : int = 7_68 , snake_case_ : str=77 , snake_case_ : List[Any]=4 , snake_case_ : float = 0.0 , snake_case_ : str = "silu" , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "linear" , snake_case_ : Optional[str] = "prd" , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , )-> List[str]:
super().__init__()
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =attention_head_dim
__lowerCAmelCase =num_attention_heads * attention_head_dim
__lowerCAmelCase =additional_embeddings
__lowerCAmelCase =time_embed_dim or inner_dim
__lowerCAmelCase =embedding_proj_dim or embedding_dim
__lowerCAmelCase =clip_embed_dim or embedding_dim
__lowerCAmelCase =Timesteps(snake_case_ , snake_case_ , 0)
__lowerCAmelCase =TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_)
__lowerCAmelCase =nn.Linear(snake_case_ , snake_case_)
if embedding_proj_norm_type is None:
__lowerCAmelCase =None
elif embedding_proj_norm_type == "layer":
__lowerCAmelCase =nn.LayerNorm(snake_case_)
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""")
__lowerCAmelCase =nn.Linear(snake_case_ , snake_case_)
if encoder_hid_proj_type is None:
__lowerCAmelCase =None
elif encoder_hid_proj_type == "linear":
__lowerCAmelCase =nn.Linear(snake_case_ , snake_case_)
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""")
__lowerCAmelCase =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_))
if added_emb_type == "prd":
__lowerCAmelCase =nn.Parameter(torch.zeros(1 , 1 , snake_case_))
elif added_emb_type is None:
__lowerCAmelCase =None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""")
__lowerCAmelCase =nn.ModuleList(
[
BasicTransformerBlock(
snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn="""gelu""" , attention_bias=snake_case_ , )
for d in range(snake_case_)
])
if norm_in_type == "layer":
__lowerCAmelCase =nn.LayerNorm(snake_case_)
elif norm_in_type is None:
__lowerCAmelCase =None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""")
__lowerCAmelCase =nn.LayerNorm(snake_case_)
__lowerCAmelCase =nn.Linear(snake_case_ , snake_case_)
__lowerCAmelCase =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0)
causal_attention_mask.triu_(1)
__lowerCAmelCase =causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , snake_case_ , persistent=snake_case_)
__lowerCAmelCase =nn.Parameter(torch.zeros(1 , snake_case_))
__lowerCAmelCase =nn.Parameter(torch.zeros(1 , snake_case_))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase ( self : Optional[int])-> Dict[str, AttentionProcessor]:
__lowerCAmelCase ={}
def fn_recursive_add_processors(snake_case_ : str , snake_case_ : torch.nn.Module , snake_case_ : Dict[str, AttentionProcessor]):
if hasattr(snake_case_ , """set_processor"""):
__lowerCAmelCase =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , snake_case_ , snake_case_)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_)
return processors
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Union[AttentionProcessor, Dict[str, AttentionProcessor]])-> Optional[Any]:
__lowerCAmelCase =len(self.attn_processors.keys())
if isinstance(snake_case_ , snake_case_) and len(snake_case_) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(snake_case_)} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""")
def fn_recursive_attn_processor(snake_case_ : str , snake_case_ : torch.nn.Module , snake_case_ : Optional[int]):
if hasattr(snake_case_ , """set_processor"""):
if not isinstance(snake_case_ , snake_case_):
module.set_processor(snake_case_)
else:
module.set_processor(processor.pop(F"""{name}.processor"""))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , snake_case_ , snake_case_)
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_)
def UpperCamelCase ( self : str)-> List[Any]:
self.set_attn_processor(AttnProcessor())
def UpperCamelCase ( self : str , snake_case_ : str , snake_case_ : Union[torch.Tensor, float, int] , snake_case_ : torch.FloatTensor , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[torch.BoolTensor] = None , snake_case_ : bool = True , )-> Any:
__lowerCAmelCase =hidden_states.shape[0]
__lowerCAmelCase =timestep
if not torch.is_tensor(snake_case_):
__lowerCAmelCase =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(snake_case_) and len(timesteps.shape) == 0:
__lowerCAmelCase =timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCAmelCase =timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device)
__lowerCAmelCase =self.time_proj(snake_case_)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCAmelCase =timesteps_projected.to(dtype=self.dtype)
__lowerCAmelCase =self.time_embedding(snake_case_)
if self.embedding_proj_norm is not None:
__lowerCAmelCase =self.embedding_proj_norm(snake_case_)
__lowerCAmelCase =self.embedding_proj(snake_case_)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCAmelCase =self.encoder_hidden_states_proj(snake_case_)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
__lowerCAmelCase =self.proj_in(snake_case_)
__lowerCAmelCase =self.positional_embedding.to(hidden_states.dtype)
__lowerCAmelCase =[]
__lowerCAmelCase =0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case_)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
__lowerCAmelCase =proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
__lowerCAmelCase =hidden_states[:, None, :]
__lowerCAmelCase =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCAmelCase =self.prd_embedding.to(hidden_states.dtype).expand(snake_case_ , -1 , -1)
additional_embeds.append(snake_case_)
__lowerCAmelCase =torch.cat(
snake_case_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCAmelCase =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCAmelCase =F.pad(
snake_case_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCAmelCase =hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCAmelCase =(1 - attention_mask.to(hidden_states.dtype)) * -1_0_0_0_0.0
__lowerCAmelCase =F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0)
__lowerCAmelCase =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
__lowerCAmelCase =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
__lowerCAmelCase =self.norm_in(snake_case_)
for block in self.transformer_blocks:
__lowerCAmelCase =block(snake_case_ , attention_mask=snake_case_)
__lowerCAmelCase =self.norm_out(snake_case_)
if self.prd_embedding is not None:
__lowerCAmelCase =hidden_states[:, -1]
else:
__lowerCAmelCase =hidden_states[:, additional_embeddings_len:]
__lowerCAmelCase =self.proj_to_clip_embeddings(snake_case_)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case_)
def UpperCamelCase ( self : List[str] , snake_case_ : List[Any])-> int:
__lowerCAmelCase =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : int ) -> None:
__lowerCAmelCase =generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[]
for current_row_idx in range(__lowerCamelCase ):
__lowerCAmelCase =populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
__lowerCAmelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase , __lowerCAmelCase =1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> None:
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase =above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[[1]]
for row_index in range(1 , __lowerCamelCase ):
__lowerCAmelCase =[0] + result[-1] + [0]
__lowerCAmelCase =row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase =sum(divmod(__lowerCamelCase , 2 ) )
__lowerCAmelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__lowerCAmelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase =row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def __lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase : Callable , __lowerCamelCase : int ) -> None:
__lowerCAmelCase =f"""{func.__name__}({value})"""
__lowerCAmelCase =timeit(f"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 456
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
super().setUp()
def a__ ( self : int , **A_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **A_ )
def a__ ( self : List[str] , **A_ : Optional[int] ) -> Any:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **A_ )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = '永和服装饰品有限公司,今天天气非常好'
lowerCamelCase_ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ , lowerCamelCase_ = self.get_chinese_input_output_texts()
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ , lowerCamelCase_ = self.get_chinese_input_output_texts()
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
| 70
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( _a : Optional[Any] , _a : Optional[Any]=False ) -> Optional[Any]:
lowerCAmelCase_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( _a : str , _a : Dict , _a : Optional[Any]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Optional[int] = ''
else:
lowerCAmelCase_ : List[str] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( _a : str ) -> Tuple:
lowerCAmelCase_ : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_a , _a )
def _lowerCAmelCase ( _a : Tuple , _a : str , _a : Optional[Any] ) -> List[Any]:
lowerCAmelCase_ : List[str] = dct.pop(_a )
lowerCAmelCase_ : List[str] = val
def _lowerCAmelCase ( ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( _a : Tuple , _a : Union[str, Any] , _a : Dict=True ) -> int:
lowerCAmelCase_ : Optional[int] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Optional[int] = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : Any = 10_00
lowerCAmelCase_ : Tuple = 'huggingface/label-files'
lowerCAmelCase_ : str = 'imagenet-1k-id2label.json'
lowerCAmelCase_ : int = json.load(open(hf_hub_download(_a , _a , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Any = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Tuple = idalabel
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : str = 3_84
lowerCAmelCase_ : Dict = 15_36
lowerCAmelCase_ : Tuple = 12
lowerCAmelCase_ : Dict = 6
# load original model from torch hub
lowerCAmelCase_ : List[str] = torch.hub.load("""facebookresearch/dino:main""" , _a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(_a )
lowerCAmelCase_ : Any = create_rename_keys(_a , base_model=_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : Union[str, Any] = ViTModel(_a , add_pooling_layer=_a ).eval()
else:
lowerCAmelCase_ : Optional[Any] = ViTForImageClassification(_a ).eval()
model.load_state_dict(_a )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : Tuple = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[Any] = encoding['pixel_values']
lowerCAmelCase_ : Optional[Any] = model(_a )
if base_model:
lowerCAmelCase_ : List[Any] = original_model(_a )
assert torch.allclose(_a , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : str = original_model(_a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
Path(_a ).mkdir(exist_ok=_a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ : Any = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 717
|
from collections import namedtuple
UpperCAmelCase_ : Union[str, Any] = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase_ : int = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def _lowerCAmelCase ( _a : float , _a : str , _a : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ):
a__ = state_dict.pop(_SCREAMING_SNAKE_CASE )
a__ = val
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
a__ = value
else:
a__ = value
return new_state_dict
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:2_5_6, :]
a__ = in_proj_bias[:2_5_6]
a__ = in_proj_weight[2_5_6:5_1_2, :]
a__ = in_proj_bias[2_5_6:5_1_2]
a__ = in_proj_weight[-2_5_6:, :]
a__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:2_5_6, :]
a__ = in_proj_bias[:2_5_6]
a__ = in_proj_weight[2_5_6:5_1_2, :]
a__ = in_proj_bias[2_5_6:5_1_2]
a__ = in_proj_weight[-2_5_6:, :]
a__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
a__ = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ = in_proj_weight_cross_attn[:2_5_6, :]
a__ = in_proj_bias_cross_attn[:2_5_6]
a__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
a__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
a__ = in_proj_weight_cross_attn[-2_5_6:, :]
a__ = in_proj_bias_cross_attn[-2_5_6:]
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
a__ , a__ = image.size
a__ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
a__ = target_max_size / current_max_size
a__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = F.to_tensor(_SCREAMING_SNAKE_CASE )
a__ = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
logger.info('Converting model...' )
# load original state dict
a__ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
a__ = state_dict.pop(_SCREAMING_SNAKE_CASE )
a__ = val
# create HuggingFace model and load state dict
a__ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a__ = 1_5
a__ = 2
a__ = {0: 'table', 1: 'table rotated'}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
else:
a__ = 1_2_5
a__ = 6
a__ = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
a__ = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
a__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
a__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_SCREAMING_SNAKE_CASE )
a__ = Image.open(_SCREAMING_SNAKE_CASE ).convert('RGB' )
a__ = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
a__ = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
a__ = (1, 1_5, 3)
a__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
a__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
a__ = (1, 1_2_5, 7)
a__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
a__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
a__ = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 335
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , A_ , )
super().__init__(*A_ , **A_ )
| 433
| 0
|
'''simple docstring'''
from math import pow, sqrt
def __lowerCAmelCase (*__lowerCAmelCase ):
_UpperCAmelCase : str = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :str , _UpperCAmelCase :Any ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
A_ = layer.split('''metadata''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A_ = layer.split('''kvstore''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A_ = layer.split('''/''' )
A_ = '''/'''.join(split_layer[:-1] )
A_ = (split_layer[-1],)
if "kvstore/path" in layer:
A_ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A_ = '''file'''
else:
A_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :Union[str, Any] ) -> Any:
'''simple docstring'''
A_ = rename_keys(_UpperCAmelCase )
A_ = {}
for k, v in current_block.items():
A_ = v
A_ = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :str , _UpperCAmelCase :List[Any] , _UpperCAmelCase :str , _UpperCAmelCase :str = WEIGHTS_NAME ) -> Optional[int]:
'''simple docstring'''
A_ = convert_file_size_to_int(_UpperCAmelCase )
A_ = []
A_ = {}
A_ = 0
A_ = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A_ = flatten_dict(_UpperCAmelCase , sep='''/''' )
A_ = {}
for layer in checkpoint_info.keys():
A_ , A_ , A_ = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
A_ = content
else:
A_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A_ = torch.tensor(_UpperCAmelCase )
A_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A_ , A_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
A_ = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A_ = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A_ = {}
A_ = 0
A_ = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A_ = {}
A_ = {}
for idx, shard in enumerate(_UpperCAmelCase ):
A_ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
A_ = shard
for key in shard:
A_ = shard_file
# Add the metadata
A_ = {'''total_size''': total_size}
A_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
A_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a__ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A_ = TaTokenizer.from_pretrained('''t5-small''' )
A_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
A_ = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 188
| 0
|
'''simple docstring'''
from __future__ import annotations
def __a ( A__ ) -> list[int]:
if len(lowerCamelCase__ ) == 0:
return array
lowerCAmelCase = min(lowerCamelCase__ ), max(lowerCamelCase__ )
# Compute the variables
lowerCAmelCase = _max - _min + 1
lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowerCAmelCase = i - _min
lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowerCAmelCase = 0
for i in range(lowerCamelCase__ ):
while holes_repeat[i] > 0:
lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[int] = input('Enter numbers separated by comma:\n')
lowercase : str = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 710
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'transfo-xl'
lowerCAmelCase = ['mems']
lowerCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=2_6_7_7_3_5 , SCREAMING_SNAKE_CASE : Dict=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : List[str]=6_4 , SCREAMING_SNAKE_CASE : int=4_0_9_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=1_8 , SCREAMING_SNAKE_CASE : Dict=1_6_0_0 , SCREAMING_SNAKE_CASE : Any=1_0_0_0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Optional[Any]=-1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int="normal" , SCREAMING_SNAKE_CASE : Optional[int]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_1 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE )
if proj_share_all_but_first:
lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase = [False] + [False] * len(self.cutoffs )
lowerCAmelCase = d_model
lowerCAmelCase = d_embed
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = div_val
lowerCAmelCase = pre_lnorm
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = mem_len
lowerCAmelCase = same_length
lowerCAmelCase = attn_type
lowerCAmelCase = clamp_len
lowerCAmelCase = sample_softmax
lowerCAmelCase = adaptive
lowerCAmelCase = dropout
lowerCAmelCase = dropatt
lowerCAmelCase = untie_r
lowerCAmelCase = init
lowerCAmelCase = init_range
lowerCAmelCase = proj_init_std
lowerCAmelCase = init_std
lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def __A ( self : Any , SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 159
| 0
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 131
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case : Dict = open # noqa: we just need to have a builtin inside this module to test it properly
| 131
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : int = ['keras_nlp']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
requires_backends(self , ['keras_nlp'] )
| 706
|
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
# TODO Update this
_snake_case : Tuple = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "esm"
def __init__( self : Dict , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Optional[int]=768 , lowerCamelCase : Dict=12 , lowerCamelCase : str=12 , lowerCamelCase : List[Any]=3072 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Dict=1026 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Optional[Any]=1E-12 , lowerCamelCase : Optional[int]="absolute" , lowerCamelCase : str=True , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=False , lowerCamelCase : int=False , lowerCamelCase : Dict=None , lowerCamelCase : Any=None , **lowerCamelCase : Any , ) -> int:
super().__init__(pad_token_id=lowerCamelCase , mask_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[int] = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : str = position_embedding_type
__snake_case : List[str] = use_cache
__snake_case : Tuple = emb_layer_norm_before
__snake_case : str = token_dropout
__snake_case : Any = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__snake_case : int = EsmFoldConfig()
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = EsmFoldConfig(**lowerCamelCase )
__snake_case : List[str] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__snake_case : int = get_default_vocab_list()
else:
__snake_case : int = vocab_list
else:
__snake_case : List[Any] = None
__snake_case : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCamelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase ):
__snake_case : int = self.esmfold_config.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : str = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 128
__UpperCAmelCase : "TrunkConfig" = None
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
if self.trunk is None:
__snake_case : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase ):
__snake_case : Any = TrunkConfig(**self.trunk )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
__snake_case : Optional[Any] = asdict(self )
__snake_case : Tuple = self.trunk.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : int = 48
__UpperCAmelCase : int = 1024
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 32
__UpperCAmelCase : float = 0
__UpperCAmelCase : float = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = 128
__UpperCAmelCase : "StructureModuleConfig" = None
def __snake_case ( self : List[Any] ) -> Any:
if self.structure_module is None:
__snake_case : Union[str, Any] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase ):
__snake_case : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__snake_case : List[Any] = self.sequence_state_dim // self.sequence_head_width
__snake_case : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(F'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def __snake_case ( self : int ) -> Dict:
__snake_case : str = asdict(self )
__snake_case : Any = self.structure_module.to_dict()
return output
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : int = 384
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 16
__UpperCAmelCase : int = 128
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 4
__UpperCAmelCase : int = 8
__UpperCAmelCase : float = 0.1
__UpperCAmelCase : int = 8
__UpperCAmelCase : int = 1
__UpperCAmelCase : int = 2
__UpperCAmelCase : int = 7
__UpperCAmelCase : int = 10
__UpperCAmelCase : float = 1e-8
__UpperCAmelCase : float = 1e5
def __snake_case ( self : Dict ) -> Any:
return asdict(self )
def lowerCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__a : Optional[Any] = """facebook/wmt19-en-de"""
__a : Union[str, Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__a : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__a : Optional[Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__a : Optional[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
__a : Union[str, Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 606
| 0
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
A__ = b * b - 4 * a * c
A__ = (-b + sqrt(UpperCamelCase__ )) / (2 * a)
A__ = (-b - sqrt(UpperCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
"""simple docstring"""
A__ , A__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 536
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCamelCase = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCamelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if "://" in dataset_path:
A__ = dataset_path.split('://' )[1]
return dataset_path
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = not is_remote_filesystem(UpperCamelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(UpperCamelCase__ ) , fs._strip_protocol(UpperCamelCase__ ) )
else:
fs.mv(UpperCamelCase__ , UpperCamelCase__ , recursive=UpperCamelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A__ = None
A__ = None
A__ = threading.Lock()
| 536
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__magic_name__ : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (_a ):
def __init__( self : Tuple , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__lowerCamelCase )
def A__ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase__ = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
lowerCAmelCase__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A__ ( self : int , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BeautifulSoup(__lowerCamelCase , '''html.parser''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase__ = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A__ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ''''''
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : int , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCAmelCase__ = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase__ = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
lowerCAmelCase__ = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(__lowerCamelCase )}.""" )
lowerCAmelCase__ = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
lowerCAmelCase__ = [html_strings]
# Get nodes + xpaths
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for html_string in html_strings:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
lowerCAmelCase__ = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase__ = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
lowerCAmelCase__ = {'''nodes''': nodes, '''xpaths''': xpaths}
lowerCAmelCase__ = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 615
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCAmelCase , output_all_encodings=__lowerCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase__ = _load_vocab(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls=__lowerCAmelCase )
lowerCAmelCase__ = nlp.model.BERTModel(
__lowerCAmelCase , len(__lowerCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCAmelCase , use_token_type_embed=__lowerCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCAmelCase , use_decoder=__lowerCAmelCase , )
original_bort.load_parameters(__lowerCAmelCase , cast_dtype=__lowerCAmelCase , ignore_extra=__lowerCAmelCase )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCAmelCase ),
}
lowerCAmelCase__ = BertConfig.from_dict(__lowerCAmelCase )
lowerCAmelCase__ = BertForMaskedLM(__lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase )['''input_ids''']
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=__lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ = BertModel.from_pretrained(__lowerCAmelCase )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ = hf_bort_model(**__lowerCAmelCase )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__magic_name__ : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 615
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: str=32 , UpperCamelCase__: int=0.25 , UpperCamelCase__: Dict=8 , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[int]=1_024 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: Union[str, Any]="relu6" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: int=True , UpperCamelCase__: Dict=10 , UpperCamelCase__: Dict=None , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Tuple = min_depth
lowerCamelCase__ : int = tf_padding
lowerCamelCase__ : Union[str, Any] = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : str = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Any = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Any = self.num_labels
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = MobileNetVaModelTester(self )
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[str] = [*signature.parameters.keys()]
lowerCamelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Dict = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Tuple = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 631
|
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631
| 1
|
"""simple docstring"""
import os
from pathlib import Path
def __A () ->List[str]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCAmelCase__ :List[Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
lowerCAmelCase__ :int = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 93
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase ( lowercase_ , lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
lowerCAmelCase_ = TimmBackboneConfig
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> List[str]:
requires_backends(self , '''timm''' )
super().__init__(UpperCamelCase__ )
_UpperCamelCase =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase__ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_UpperCamelCase =getattr(UpperCamelCase__ , '''use_pretrained_backbone''' , UpperCamelCase__ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCamelCase =config.out_indices if getattr(UpperCamelCase__ , '''out_indices''' , UpperCamelCase__ ) is not None else (-1,)
_UpperCamelCase =timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCamelCase =self._backbone.return_layers
_UpperCamelCase ={layer['''module''']: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , UpperCamelCase__ : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> Optional[int]:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCamelCase =kwargs.pop('''config''' , TimmBackboneConfig() )
_UpperCamelCase =kwargs.pop('''use_timm_backbone''' , UpperCamelCase__ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_UpperCamelCase =kwargs.pop('''num_channels''' , config.num_channels )
_UpperCamelCase =kwargs.pop('''features_only''' , config.features_only )
_UpperCamelCase =kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_UpperCamelCase =kwargs.pop('''out_indices''' , config.out_indices )
_UpperCamelCase =TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[str] ) -> List[str]:
pass
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : str ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
_UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCamelCase =self._all_layers
_UpperCamelCase =self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =self._return_layers
_UpperCamelCase =tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCamelCase =self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =None
_UpperCamelCase =tuple(UpperCamelCase__ )
_UpperCamelCase =tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCamelCase =(feature_maps,)
if output_hidden_states:
_UpperCamelCase =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 404
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_44_10 )
| 486
|
from math import factorial
UpperCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A_( A ):
if not isinstance(A , A ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A ) )
def A_( A = 60 , A = 1000000 ):
if not isinstance(A , A ) or not isinstance(A , A ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase_ = 0
# the cached sizes of the previous chains
UpperCAmelCase_ = {}
for start_chain_element in range(1 , A ):
# The temporary set will contain the elements of the chain
UpperCAmelCase_ = set()
UpperCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A )
chain_set_length += 1
UpperCAmelCase_ = digit_factorial_sum(A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution()}")
| 486
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : List[str] = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 213
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
UpperCamelCase_ : int = False
def _A ( self : Union[str, Any] ):
super().setUp()
SCREAMING_SNAKE_CASE : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
SCREAMING_SNAKE_CASE : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _A ( self : List[Any] , **UpperCAmelCase_ : str ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : int = "adapt act apte"
return input_text, output_text
def _A ( self : str ):
SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Tuple = "adapt act apte"
SCREAMING_SNAKE_CASE : List[str] = ["adapt", "act", "ap@@", "te"]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
SCREAMING_SNAKE_CASE : str = "I am a small frog."
SCREAMING_SNAKE_CASE : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : int = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
SCREAMING_SNAKE_CASE : Tuple = "I am a small frog ."
SCREAMING_SNAKE_CASE : Optional[int] = "."
SCREAMING_SNAKE_CASE : Dict = tok(UpperCAmelCase_ )["input_ids"]
SCREAMING_SNAKE_CASE : Optional[Any] = tok(UpperCAmelCase_ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 62
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__a: Any = random.Random()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=1.0 , __snake_case=None , __snake_case=None ) -> List[Any]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str=7 , lowerCamelCase : str=400 , lowerCamelCase : Optional[int]=2000 , lowerCamelCase : str=10 , lowerCamelCase : List[str]=160 , lowerCamelCase : Optional[Any]=8 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Tuple=4000 , lowerCamelCase : List[str]=False , lowerCamelCase : Tuple=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : Dict , lowerCamelCase : int=False , lowerCamelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(lowerCamelCase )[0]
check_json_file_has_correct_format(lowerCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(lowerCamelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(lowerCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(lowerCamelCase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(lowerCamelCase )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase ) - 1 ) < 1E-3 ) )
| 402
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCamelCase : int = 128 , lowerCamelCase : int = 256 , lowerCamelCase : float = 2000.0 , lowerCamelCase : int = 768 , lowerCamelCase : int = 12 , lowerCamelCase : int = 12 , lowerCamelCase : int = 64 , lowerCamelCase : int = 2048 , lowerCamelCase : float = 0.1 , ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Sequential(
nn.Linear(lowerCamelCase , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase ) , nn.SiLU() , )
_UpperCAmelCase = nn.Embedding(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = False
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
_UpperCAmelCase = nn.ModuleList()
for lyr_num in range(lowerCamelCase ):
# FiLM conditional T5 decoder
_UpperCAmelCase = DecoderLayer(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
self.decoders.append(lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase )
_UpperCAmelCase = nn.Dropout(p=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_UpperCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_UpperCAmelCase = self.conditioning_emb(lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_UpperCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_UpperCAmelCase = torch.broadcast_to(
torch.arange(lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_UpperCAmelCase = self.position_encoding(lowerCamelCase )
_UpperCAmelCase = self.continuous_inputs_projection(lowerCamelCase )
inputs += position_encodings
_UpperCAmelCase = self.dropout(lowerCamelCase )
# decoder: No padding present.
_UpperCAmelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_UpperCAmelCase = [(x, self.encoder_decoder_mask(lowerCamelCase , lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_UpperCAmelCase = lyr(
lowerCamelCase , conditioning_emb=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )[0]
_UpperCAmelCase = self.decoder_norm(lowerCamelCase )
_UpperCAmelCase = self.post_dropout(lowerCamelCase )
_UpperCAmelCase = self.spec_out(lowerCamelCase )
return spec_out
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any=1E-6 ) -> int:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCamelCase , d_kv=lowerCamelCase , num_heads=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase , layer_norm_epsilon=lowerCamelCase ) )
def lowerCamelCase ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.layer[0](
lowerCamelCase , conditioning_emb=lowerCamelCase , attention_mask=lowerCamelCase , )
if encoder_hidden_states is not None:
_UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_UpperCAmelCase = self.layer[1](
lowerCamelCase , key_value_states=lowerCamelCase , attention_mask=lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_UpperCAmelCase = self.layer[-1](lowerCamelCase , lowerCamelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = TaLayerNorm(lowerCamelCase )
_UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
_UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict=None , ) -> List[Any]:
"""simple docstring"""
# pre_self_attention_layer_norm
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
_UpperCAmelCase = self.FiLMLayer(lowerCamelCase , lowerCamelCase )
# Self-attention block
_UpperCAmelCase = self.attention(lowerCamelCase )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Attention(query_dim=lowerCamelCase , heads=lowerCamelCase , dim_head=lowerCamelCase , out_bias=lowerCamelCase , scale_qk=lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
_UpperCAmelCase = self.attention(
lowerCamelCase , encoder_hidden_states=lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return layer_output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = TaDenseGatedActDense(d_model=lowerCamelCase , d_ff=lowerCamelCase , dropout_rate=lowerCamelCase )
_UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase )
_UpperCAmelCase = TaLayerNorm(lowerCamelCase , eps=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.layer_norm(lowerCamelCase )
if conditioning_emb is not None:
_UpperCAmelCase = self.film(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = self.DenseReluDense(lowerCamelCase )
_UpperCAmelCase = hidden_states + self.dropout(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
_UpperCAmelCase = nn.Dropout(lowerCamelCase )
_UpperCAmelCase = NewGELUActivation()
def lowerCamelCase ( self : Tuple , lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.act(self.wi_a(lowerCamelCase ) )
_UpperCAmelCase = self.wi_a(lowerCamelCase )
_UpperCAmelCase = hidden_gelu * hidden_linear
_UpperCAmelCase = self.dropout(lowerCamelCase )
_UpperCAmelCase = self.wo(lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : Tuple , lowerCamelCase : str=1E-6 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.ones(lowerCamelCase ) )
_UpperCAmelCase = eps
def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase )
_UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_UpperCAmelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCamelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : Any , lowerCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(lowerCamelCase , out_features * 2 , bias=lowerCamelCase )
def lowerCamelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.scale_bias(lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(lowerCamelCase , 2 , -1 )
_UpperCAmelCase = x * (1 + scale) + shift
return x
| 402
| 1
|
def lowercase_ (A : int ):
if not isinstance(A , A ):
raise ValueError('check_bouncy() accepts only integer arguments' )
snake_case__ : List[str] = str(A )
snake_case__ : Tuple = ''.join(sorted(A ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowercase_ (A : float = 9_9 ):
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
snake_case__ : List[Any] = 0
snake_case__ : Optional[int] = 1
while True:
if check_bouncy(A ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 478
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a_ :Tuple = "\\n\n"
a_ :Optional[int] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
a_ :str = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ), reference_urls=['https://huggingface.co/docs/transformers/perplexity'], )
def lowercase_ ( self : int, _snake_case : Optional[Any], _snake_case : Tuple, _snake_case : int = 1_6, _snake_case : bool = True, _snake_case : Any=None ) ->Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ : Any = 'cuda'
else:
snake_case__ : Any = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case )
snake_case__ : List[str] = model.to(_snake_case )
snake_case__ : Dict = AutoTokenizer.from_pretrained(_snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ : List[Any] = model.config.max_length - 1
else:
snake_case__ : str = model.config.max_length
snake_case__ : Union[str, Any] = tokenizer(
_snake_case, add_special_tokens=_snake_case, padding=_snake_case, truncation=_snake_case, max_length=_snake_case, return_tensors='pt', return_attention_mask=_snake_case, ).to(_snake_case )
snake_case__ : Union[str, Any] = encodings['input_ids']
snake_case__ : str = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ), 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ), 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ : int = []
snake_case__ : Tuple = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0, len(_snake_case ), _snake_case ) ):
snake_case__ : Tuple = min(start_index + batch_size, len(_snake_case ) )
snake_case__ : Tuple = encoded_texts[start_index:end_index]
snake_case__ : Dict = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_snake_case )
snake_case__ : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch], dim=1 )
snake_case__ : Optional[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa ).to(_snake_case ), attn_mask], dim=1 )
snake_case__ : Union[str, Any] = encoded_batch
with torch.no_grad():
snake_case__ : int = model(_snake_case, attention_mask=_snake_case ).logits
snake_case__ : Tuple = out_logits[..., :-1, :].contiguous()
snake_case__ : Optional[int] = labels[..., 1:].contiguous()
snake_case__ : Dict = attn_mask[..., 1:].contiguous()
snake_case__ : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1, 2 ), _snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case )}
| 478
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 290
|
def __lowerCAmelCase ( __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=24 , UpperCAmelCase__=2 , UpperCAmelCase__=6 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=None , UpperCAmelCase__=1_000 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = LiltModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = self.num_labels
A__ = LiltForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = LiltForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return True
def __A ( self ):
A__ = LiltModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __A ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@slow
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase__ )
A__ = torch.tensor([[1, 2]] , device=UpperCAmelCase__ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ )
A__ = torch.Size([1, 2, 768] )
A__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase__ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase__ , atol=1e-3 ) )
| 491
|
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(UpperCAmelCase__ ) != 0:
A__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise error
A__ = rows
else:
A__ = []
def __A ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __A ( self ):
return len(self.rows )
@property
def __A ( self ):
return len(self.rows[0] )
@property
def __A ( self ):
return (self.num_rows, self.num_columns)
@property
def __A ( self ):
return self.order[0] == self.order[1]
def __A ( self ):
A__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def __A ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __A ( self ):
return bool(self.determinant() )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCAmelCase__ ).determinant()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ )
return -1 * self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
return Matrix(
[
[self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __A ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __A ( self ):
A__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def __A ( self ):
A__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(UpperCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(UpperCAmelCase__ )
else:
A__ = self.rows[0:position] + [row] + self.rows[position:]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
A__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , UpperCAmelCase__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , UpperCAmelCase__ ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , UpperCAmelCase__ ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , UpperCAmelCase__ ):
if isinstance(UpperCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase__ , UpperCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
A__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ ):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = 2_56
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Any = ["""melgan"""]
def __init__( self , A , A , A , A , A , ):
super().__init__()
# From MELGAN
_lowerCamelCase : Dict = math.log(1E-5 ) # Matches MelGAN training.
_lowerCamelCase : Optional[Any] = 4.0 # Largest value for most examples
_lowerCamelCase : int = 128
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def _lowerCAmelCase ( self , A , A=(-1.0, 1.0) , A=False ):
_lowerCamelCase : Optional[Any] = output_range
if clip:
_lowerCamelCase : Optional[Any] = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
_lowerCamelCase : List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowerCAmelCase ( self , A , A=(-1.0, 1.0) , A=False ):
_lowerCamelCase : Union[str, Any] = input_range
_lowerCamelCase : Any = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
_lowerCamelCase : List[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : int = input_tokens > 0
_lowerCamelCase : int = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
_lowerCamelCase : Any = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Optional[int] = noise_time
if not torch.is_tensor(A ):
_lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase : Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_lowerCamelCase : Any = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__( self , A , A = None , A = 100 , A = True , A = "numpy" , A = None , A = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(A )}." )
_lowerCamelCase : List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_lowerCamelCase : Optional[Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
_lowerCamelCase : Any = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
_lowerCamelCase : Dict = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_lowerCamelCase : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_lowerCamelCase : Tuple = ones
_lowerCamelCase : List[str] = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
_lowerCamelCase : Optional[int] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_lowerCamelCase : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCamelCase : Dict = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_lowerCamelCase : Optional[Any] = self.scheduler.step(A , A , A , generator=A ).prev_sample
_lowerCamelCase : int = self.scale_to_features(A , input_range=[-1.0, 1.0] )
_lowerCamelCase : Union[str, Any] = mel[:1]
_lowerCamelCase : List[str] = mel.cpu().float().numpy()
_lowerCamelCase : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('Generated segment' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
_lowerCamelCase : Dict = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_lowerCamelCase : List[Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A )
| 712
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , 'tf_padding' ) )
self.parent.assertTrue(hasattr(A , 'depth_multiplier' ) )
class A_:
"""simple docstring"""
def __init__( self , A , A=13 , A=3 , A=32 , A=0.2_5 , A=8 , A=8 , A=6 , A=32 , A=True , A=True , A=True , A="relu6" , A=1280 , A=0.1 , A=0.0_2 , A=True , A=True , A=10 , A=None , ):
_lowerCamelCase : Dict = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Tuple = depth_multiplier
_lowerCamelCase : List[Any] = depth_divisible_by
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : Union[str, Any] = expand_ratio
_lowerCamelCase : Any = tf_padding
_lowerCamelCase : List[Any] = output_stride
_lowerCamelCase : str = first_layer_is_expansion
_lowerCamelCase : Any = finegrained_output
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_lowerCamelCase : int = classifier_dropout_prob
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = scope
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : str = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , A , A , A , A ):
_lowerCamelCase : List[Any] = MobileNetVaModel(config=A )
model.to(A )
model.eval()
_lowerCamelCase : Optional[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowerCAmelCase ( self , A , A , A , A ):
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : List[str] = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
_lowerCamelCase : Optional[int] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , A , A , A , A ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : List[Any] = MobileNetVaForSemanticSegmentation(A )
model.to(A )
model.eval()
_lowerCamelCase : Optional[int] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCamelCase : Dict = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ : Dict = False
a_ : List[str] = False
a_ : str = False
a_ : Any = False
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = MobileNetVaModelTester(self )
_lowerCamelCase : Union[str, Any] = MobileNetVaConfigTester(self , config_class=A , has_text_modality=A )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(A )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowerCAmelCase ( self ):
def check_hidden_states_output(A , A , A ):
_lowerCamelCase : Dict = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(A , A ) )
_lowerCamelCase : Any = outputs.hidden_states
_lowerCamelCase : Any = 16
self.assertEqual(len(A ) , A )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : int = True
check_hidden_states_output(A , A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def _lowerCAmelCase ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_(unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(A )
_lowerCamelCase : Dict = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Any = image_processor(images=A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**A )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , A )
_lowerCamelCase : Any = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_lowerCamelCase : Any = model.to(A )
_lowerCamelCase : Optional[Any] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : str = image_processor(images=A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**A )
_lowerCamelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A )
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
| 349
| 0
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> int:
_lowercase = len(snake_case__ )
while cur > 1:
# Find the maximum number in arr
_lowercase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowercase = arr[mi::-1] + arr[mi + 1 : len(snake_case__ )]
# Reverse whole list
_lowercase = arr[cur - 1 :: -1] + arr[cur : len(snake_case__ )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case = input("""Enter numbers separated by a comma:\n""").strip()
snake_case = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 67
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 674
| 0
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =OmegaConf.load(_lowerCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCAmelCase ) ) )
return config
def _A ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
if conf_path is None:
__lowercase ='./model_checkpoints/vqgan_only.yaml'
__lowercase =load_config(_lowerCAmelCase , display=_lowerCAmelCase )
__lowercase =VQModel(**config.model.params )
if ckpt_path is None:
__lowercase ='./model_checkpoints/vqgan_only.pt'
__lowercase =torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
if ".ckpt" in ckpt_path:
__lowercase =sd['state_dict']
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
model.to(_lowerCAmelCase )
del sd
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase =model.encode(_lowerCAmelCase )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__lowercase =model.decode(_lowerCAmelCase )
return xrec
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase , __lowercase =string.rsplit('.' , 1 )
if reload:
__lowercase =importlib.import_module(_lowerCAmelCase )
importlib.reload(_lowerCAmelCase )
return getattr(importlib.import_module(_lowerCAmelCase , package=_lowerCAmelCase ) , cls )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase=True ):
"""simple docstring"""
__lowercase =instantiate_from_config(_lowerCAmelCase )
if sd is not None:
model.load_state_dict(_lowerCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if ckpt:
__lowercase =torch.load(_lowerCAmelCase , map_location='cpu' )
__lowercase =pl_sd['global_step']
print(f"""loaded model from global step {global_step}.""" )
else:
__lowercase ={'state_dict': None}
__lowercase =None
__lowercase =load_model_from_config(config.model , pl_sd['state_dict'] , gpu=_lowerCAmelCase , eval_mode=_lowerCAmelCase )['model']
return model, global_step
| 454
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 454
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=8 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=9_9 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=3_6 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : str=0.0 , _UpperCamelCase : Any=5_1_2 , _UpperCamelCase : int=1_6 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Dict=None , ) ->Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def snake_case__( self : List[Any] ) ->List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Optional[int] ) ->Optional[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__( self : List[Any] ) ->str:
snake_case_ = self.get_config()
snake_case_ = 3_0_0
return config
def snake_case__( self : List[str] ) ->Optional[Any]:
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) ->Dict:
snake_case_ = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , ) ->Optional[Any]:
snake_case_ = True
snake_case_ = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Any:
snake_case_ = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[str] ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) ->int:
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->Any:
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = ()
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : Dict ) ->Optional[int]:
self.config_tester.run_common_tests()
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : List[str] ) ->Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def snake_case__( self : str ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : int ) ->Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : int ) ->str:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def snake_case__( self : Union[str, Any] ) ->Tuple:
return
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : List[Any] ) ->Optional[Any]:
snake_case_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : List[str] ) ->int:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__( self : Union[str, Any] ) ->Any:
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = 5_0_2_6_5
snake_case_ = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 39
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83
| 0
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = [1]
__lowercase , __lowercase , __lowercase = 0, 0, 0
__lowercase = ugly_nums[ia] * 2
__lowercase = ugly_nums[ia] * 3
__lowercase = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
__lowercase = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
__lowercase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowercase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowercase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 715
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : int ,lowercase__ : float = 0 ):
__lowercase , __lowercase = row, column
__lowercase = [[default_value for c in range(lowercase__ )] for r in range(lowercase__ )]
def __str__( self : List[str] ):
__lowercase = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowercase = 0
for row_vector in self.array:
for obj in row_vector:
__lowercase = max(lowercase__ ,len(str(lowercase__ ) ) )
__lowercase = F"%{max_element_length}s"
# Make string and return
def single_line(lowercase__ : list[float] ) -> str:
nonlocal string_format_identifier
__lowercase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ):
return str(self )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : tuple[int, int] ):
if not (isinstance(lowercase__ ,(list, tuple) ) and len(lowercase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple ,lowercase__ : tuple[int, int] ):
assert self.validate_indicies(lowercase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple ,lowercase__ : tuple[int, int] ,lowercase__ : float ):
assert self.validate_indicies(lowercase__ )
__lowercase = value
def __add__( self : List[Any] ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ )
assert self.row == another.row and self.column == another.column
# Add
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = -self[r, c]
return result
def __sub__( self : str ,lowercase__ : Matrix ):
return self + (-another)
def __mul__( self : Dict ,lowercase__ : int | float | Matrix ):
if isinstance(lowercase__ ,(int, float) ): # Scalar multiplication
__lowercase = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c] * another
return result
elif isinstance(lowercase__ ,lowercase__ ): # Matrix multiplication
assert self.column == another.row
__lowercase = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowercase = F"Unsupported type given for another ({type(lowercase__ )})"
raise TypeError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
__lowercase = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Matrix ,lowercase__ : Matrix ):
assert isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowercase = v.transpose()
__lowercase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _A ( ):
"""simple docstring"""
__lowercase = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowercase = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 1, 2, -3
__lowercase = Matrix(3 , 1 , 0 )
__lowercase , __lowercase , __lowercase = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(A__ , A__ )}" )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 624
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 16 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase__ : int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ : Optional[Any] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ : List[Any] = 8
else:
UpperCAmelCase__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
UpperCAmelCase__ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
UpperCAmelCase__ : Optional[Any] = 2
# Initialize accelerator
UpperCAmelCase__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ : Tuple = config["""lr"""]
UpperCAmelCase__ : Dict = int(config["""num_epochs"""] )
UpperCAmelCase__ : Union[str, Any] = int(config["""seed"""] )
UpperCAmelCase__ : Any = int(config["""batch_size"""] )
UpperCAmelCase__ : Dict = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase )
def inner_training_loop(__UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ : Tuple = AdamW(params=model.parameters() , lr=__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Any = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate scheduler
UpperCAmelCase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ : Dict = model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = outputs.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**__UpperCamelCase )
UpperCAmelCase__ : List[Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
UpperCAmelCase__ : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 65
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "efficientnet"
def __init__( self : Optional[Any] ,_snake_case : int = 3 ,_snake_case : int = 600 ,_snake_case : float = 2.0 ,_snake_case : float = 3.1 ,_snake_case : int = 8 ,_snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] ,_snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] ,_snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] ,_snake_case : List[int] = [] ,_snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] ,_snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] ,_snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] ,_snake_case : float = 0.25 ,_snake_case : str = "swish" ,_snake_case : int = 2_560 ,_snake_case : str = "mean" ,_snake_case : float = 0.02 ,_snake_case : float = 0.001 ,_snake_case : float = 0.99 ,_snake_case : float = 0.5 ,_snake_case : float = 0.2 ,**_snake_case : List[str] ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = width_coefficient
lowercase__ : Dict = depth_coefficient
lowercase__ : Optional[int] = depth_divisor
lowercase__ : Optional[int] = kernel_sizes
lowercase__ : str = in_channels
lowercase__ : Any = out_channels
lowercase__ : Union[str, Any] = depthwise_padding
lowercase__ : str = strides
lowercase__ : List[str] = num_block_repeats
lowercase__ : List[str] = expand_ratios
lowercase__ : List[str] = squeeze_expansion_ratio
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Any = hidden_dim
lowercase__ : Optional[int] = pooling_type
lowercase__ : List[str] = initializer_range
lowercase__ : List[Any] = batch_norm_eps
lowercase__ : List[Any] = batch_norm_momentum
lowercase__ : Tuple = dropout_rate
lowercase__ : Tuple = drop_connect_rate
lowercase__ : Union[str, Any] = sum(_snake_case ) * 4
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
| 560
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__lowerCamelCase : Union[str, Any] = get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_UpperCamelCase =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_UpperCamelCase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_UpperCamelCase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(f'''Saving model to {ckpt_dir}''' )
_UpperCamelCase ={'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
_UpperCamelCase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Loading model from {input_model_file}''' )
_UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_UpperCamelCase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Loading model from {input_model_file}''' )
_UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_UpperCamelCase =(
os.path.join(SCREAMING_SNAKE_CASE_ , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_UpperCamelCase ={'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
_UpperCamelCase =state_dict['''model''']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_UpperCamelCase =FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_UpperCamelCase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_UpperCamelCase =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_UpperCamelCase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_UpperCamelCase =(
os.path.join(SCREAMING_SNAKE_CASE_ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_UpperCamelCase =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
_UpperCamelCase =optim_state['''optimizer''']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_UpperCamelCase =FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 700
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_UpperCamelCase =1024
_UpperCamelCase =4096
_UpperCamelCase =24
_UpperCamelCase =16
_UpperCamelCase =[5, 11, 17, 23]
_UpperCamelCase =[256, 512, 1024, 1024]
_UpperCamelCase =(1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =[256, 512, 768, 768]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase =(1, 384, 384)
_UpperCamelCase =False
_UpperCamelCase ='''project'''
if "ade" in checkpoint_url:
_UpperCamelCase =True
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase ='''huggingface/label-files'''
_UpperCamelCase ='''ade20k-id2label.json'''
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
_UpperCamelCase =[1, 150, 480, 480]
return config, expected_shape
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_UpperCamelCase =name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_UpperCamelCase =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_UpperCamelCase =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_UpperCamelCase =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_UpperCamelCase =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_UpperCamelCase =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_UpperCamelCase =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_UpperCamelCase =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_UpperCamelCase =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_UpperCamelCase =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase =name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCamelCase =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_UpperCamelCase =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_UpperCamelCase =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_UpperCamelCase =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_UpperCamelCase =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_UpperCamelCase =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_UpperCamelCase =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_UpperCamelCase =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_UpperCamelCase =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_UpperCamelCase =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_UpperCamelCase =name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_UpperCamelCase =name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_UpperCamelCase =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase =name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_UpperCamelCase =name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_UpperCamelCase =name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[: config.hidden_size, :]
_UpperCamelCase =in_proj_bias[: config.hidden_size]
_UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase =in_proj_bias[-config.hidden_size :]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =get_dpt_config(__SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# read in qkv matrices
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
_UpperCamelCase =DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
_UpperCamelCase =480 if '''ade''' in checkpoint_url else 384
_UpperCamelCase =DPTImageProcessor(size=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =prepare_img()
_UpperCamelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**__SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
_UpperCamelCase =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 271
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : List[str] = "ZinengTang/tvlt-base"
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
def _lowerCamelCase ( self: Optional[Any] , **__lowerCamelCase: int ) -> List[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCamelCase )
def _lowerCamelCase ( self: int , **__lowerCamelCase: List[str] ) -> List[str]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : str = TvltProcessor(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCamelCase )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : str = self.get_feature_extractor()
__UpperCAmelCase : int = TvltProcessor(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.ones([1_20_00] )
__UpperCAmelCase : Dict = feature_extractor(__lowerCamelCase , return_tensors="np" )
__UpperCAmelCase : int = processor(audio=__lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : Union[str, Any] = TvltProcessor(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.ones([3, 2_24, 2_24] )
__UpperCAmelCase : Dict = image_processor(__lowerCamelCase , return_tensors="np" )
__UpperCAmelCase : str = processor(images=__lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self: Optional[Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_feature_extractor()
__UpperCAmelCase : Union[str, Any] = TvltProcessor(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
__UpperCAmelCase : int = np.ones([1_20_00] )
__UpperCAmelCase : Optional[int] = np.ones([3, 2_24, 2_24] )
__UpperCAmelCase : Any = processor(audio=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Any = self.get_feature_extractor()
__UpperCAmelCase : Union[str, Any] = TvltProcessor(image_processor=__lowerCamelCase , feature_extractor=__lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 382
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''spiece.model'''}
_snake_case = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: str="<s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<unk>" , __lowerCamelCase: str="<sep>" , __lowerCamelCase: Optional[int]="<pad>" , __lowerCamelCase: List[Any]="<cls>" , __lowerCamelCase: List[Any]="<mask>" , __lowerCamelCase: int=["<eop>", "<eod>"] , __lowerCamelCase: Optional[Dict[str, Any]] = None , **__lowerCamelCase: Any , ) -> None:
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : str = do_lower_case
__UpperCAmelCase : int = remove_space
__UpperCAmelCase : str = keep_accents
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCAmelCase : int = jieba
__UpperCAmelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
return len(self.sp_model )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> int:
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self: List[Any] , __lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> List[str]:
if self.remove_space:
__UpperCAmelCase : List[str] = " ".join(inputs.strip().split() )
else:
__UpperCAmelCase : Tuple = inputs
__UpperCAmelCase : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKD" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
__UpperCAmelCase : Optional[int] = outputs.lower()
return outputs
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.preprocess_text(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
__UpperCAmelCase : int = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase : Optional[int] = cur_pieces[1:]
else:
__UpperCAmelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> Optional[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> Optional[int]:
__UpperCAmelCase : List[Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self: Any , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Optional[Any] ) -> Any:
__UpperCAmelCase : Dict = super()._decode(*__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 382
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = ["""image_processor""", """tokenizer"""]
_A : str = """AutoImageProcessor"""
_A : Union[str, Any] = """AutoTokenizer"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Union[str, Any] = self.image_processor
def __call__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A_ : Optional[int] = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
A_ : List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
A_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 480
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = ["""image_processor""", """tokenizer"""]
_A : List[Any] = """ViTImageProcessor"""
_A : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
A_ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
A_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
A_ : Tuple = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if visual_prompt is not None:
A_ : Union[str, Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
A_ : Any = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if visual_prompt is not None and images is not None:
A_ : List[str] = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A_ : Any = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowerCamelCase(self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 480
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE :List[Any] = """summarizer"""
_SCREAMING_SNAKE_CASE :Optional[Any] = AutoTokenizer
_SCREAMING_SNAKE_CASE :List[str] = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE :List[Any] = ["""text"""]
_SCREAMING_SNAKE_CASE :str = ["""text"""]
def _a ( self , _a ) -> Dict:
"""simple docstring"""
return self.pre_processor(_a , return_tensors="""pt""" , truncation=_a )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
return self.model.generate(**_a )[0]
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
return self.pre_processor.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
| 680
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( __UpperCamelCase ):
return choice(__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = random_pivot(__UpperCamelCase )
# partition based on pivot
# linear time
__lowercase : Tuple = [e for e in lst if e < pivot]
__lowercase : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCamelCase ) < k - 1:
return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="markuplm"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=2_56 , UpperCamelCase_=10_24 , UpperCamelCase_=2_16 , UpperCamelCase_=10_01 , UpperCamelCase_=32 , UpperCamelCase_=50 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> List[str]:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = intermediate_size
__lowercase : str = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[int] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : Dict = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : Any = position_embedding_type
__lowercase : int = use_cache
__lowercase : str = classifier_dropout
# additional properties
__lowercase : List[Any] = max_depth
__lowercase : List[Any] = max_xpath_tag_unit_embeddings
__lowercase : List[str] = max_xpath_subs_unit_embeddings
__lowercase : List[Any] = tag_pad_id
__lowercase : Tuple = subs_pad_id
__lowercase : Optional[int] = xpath_unit_hidden_size
| 523
| 0
|
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=10_00 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowercase : Any = n - 1
__lowercase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowercase : List[str] = 0
while count < prec:
__lowercase : int = random.randint(2 , n - 1 )
__lowercase : Dict = bin_exp_mod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if b != 1:
__lowercase : Tuple = True
for _ in range(__UpperCamelCase ):
if b == n - 1:
__lowercase : Tuple = False
break
__lowercase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 76
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 132
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ = logging.get_logger("transformers.models.speecht5")
A_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
A_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
A_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
A_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
A_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
A_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
A_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
A_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ = []
A_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
A_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
A_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
A_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase = getattr(UpperCAmelCase, UpperCAmelCase )
if weight_type is not None:
lowercase = getattr(UpperCAmelCase, UpperCAmelCase ).shape
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "running_mean":
lowercase = value
elif weight_type == "running_var":
lowercase = value
elif weight_type == "num_batches_tracked":
lowercase = value
else:
lowercase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> List[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase ,lowercase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
lowercase = []
if task == "s2t":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2T
lowercase = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase = None
lowercase = MAPPING_T2S
lowercase = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2S
lowercase = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCAmelCase, UpperCAmelCase ):
logger.info(f'{name} was ignored' )
continue
lowercase = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
lowercase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase ,lowercase = key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(UpperCAmelCase )[0].split('''.''' )[-2]
lowercase = mapped_key.replace('''*''', UpperCAmelCase )
if "weight_g" in name:
lowercase = '''weight_g'''
elif "weight_v" in name:
lowercase = '''weight_v'''
elif "bias" in name:
lowercase = '''bias'''
elif "weight" in name:
lowercase = '''weight'''
elif "running_mean" in name:
lowercase = '''running_mean'''
elif "running_var" in name:
lowercase = '''running_var'''
elif "num_batches_tracked" in name:
lowercase = '''num_batches_tracked'''
else:
lowercase = None
set_recursively(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase = full_name.split('''conv_layers.''' )[-1]
lowercase = name.split('''.''' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=None, )-> Optional[int]:
"""simple docstring"""
if config_path is not None:
lowercase = SpeechTaConfig.from_pretrained(UpperCAmelCase )
else:
lowercase = SpeechTaConfig()
if task == "s2t":
lowercase = config.max_text_positions
lowercase = SpeechTaForSpeechToText(UpperCAmelCase )
elif task == "t2s":
lowercase = 1876
lowercase = 600
lowercase = config.max_speech_positions
lowercase = SpeechTaForTextToSpeech(UpperCAmelCase )
elif task == "s2s":
lowercase = 1876
lowercase = config.max_speech_positions
lowercase = SpeechTaForSpeechToSpeech(UpperCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
lowercase = SpeechTaTokenizer(UpperCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase = AddedToken('''<mask>''', lstrip=UpperCAmelCase, rstrip=UpperCAmelCase )
lowercase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
lowercase = SpeechTaFeatureExtractor()
lowercase = SpeechTaProcessor(tokenizer=UpperCAmelCase, feature_extractor=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
lowercase = torch.load(UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], UpperCAmelCase, UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCAmelCase )
model.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 703
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowercase :
lowercase = LEDConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=20 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any=4 , ) -> str:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
lowercase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __a ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
lowercase = global_attention_mask
return config, inputs_dict
def __a ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFLEDModel(config=__lowerCamelCase ).get_decoder()
lowercase = inputs_dict['''input_ids''']
lowercase = input_ids[:1, :]
lowercase = inputs_dict['''attention_mask'''][:1, :]
lowercase = 1
# first forward pass
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
lowercase ,lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase = output_from_no_past[:, -3:, random_slice_idx]
lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=None, UpperCAmelCase=None, )-> str:
"""simple docstring"""
if attention_mask is None:
lowercase = tf.cast(tf.math.not_equal(UpperCAmelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Dict ) -> int:
'''simple docstring'''
lowercase = TFLEDModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase )
def __a ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __a ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase = 2
lowercase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowercase = True
lowercase = self.model_tester.seq_length
lowercase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : int ):
lowercase = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Optional[Any] ):
lowercase = [t.numpy() for t in outputs.encoder_attentions]
lowercase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = False
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(__lowerCamelCase )
lowercase = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __a ( self : Dict ) -> str:
'''simple docstring'''
pass
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
return tf.constant(UpperCAmelCase, dtype=tf.intaa )
A_ = 1e-4
@slow
@require_tf
class __lowercase ( unittest.TestCase ):
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
lowercase = model(**__lowerCamelCase )[0]
lowercase = (1, 10_24, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
lowercase = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-3 )
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
lowercase = model(**__lowerCamelCase )[0]
lowercase = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
lowercase = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 479
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__a = logging.getLogger(__name__)
@dataclass
class __lowercase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
@dataclass
class __lowercase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
UpperCamelCase = None
class __lowercase ( __snake_case ):
UpperCamelCase = '''train'''
UpperCamelCase = '''dev'''
UpperCamelCase = '''test'''
class __lowercase :
@staticmethod
def _lowercase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( __lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def _lowercase ( __lowerCamelCase : List[InputExample] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[str]="[CLS]" , __lowerCamelCase : Any=1 , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=0 , __lowerCamelCase : str=0 , __lowerCamelCase : Optional[int]=-1_0_0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Any=True , ) -> List[InputFeatures]:
"""simple docstring"""
UpperCAmelCase = {label: i for i, label in enumerate(__lowerCamelCase )}
UpperCAmelCase = []
for ex_index, example in enumerate(__lowerCamelCase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , __lowerCamelCase , len(__lowerCamelCase ) )
UpperCAmelCase = []
UpperCAmelCase = []
for word, label in zip(example.words , example.labels ):
UpperCAmelCase = tokenizer.tokenize(__lowerCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCamelCase ) > 0:
tokens.extend(__lowerCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCAmelCase = tokenizer.num_special_tokens_to_add()
if len(__lowerCamelCase ) > max_seq_length - special_tokens_count:
UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCAmelCase = [sequence_a_segment_id] * len(__lowerCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCAmelCase = [cls_token] + tokens
UpperCAmelCase = [pad_token_label_id] + label_ids
UpperCAmelCase = [cls_token_segment_id] + segment_ids
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(__lowerCamelCase )
# Zero-pad up to the sequence length.
UpperCAmelCase = max_seq_length - len(__lowerCamelCase )
if pad_on_left:
UpperCAmelCase = ([pad_token] * padding_length) + input_ids
UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
assert len(__lowerCamelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__lowerCamelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__lowerCamelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__lowerCamelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__lowerCamelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__lowerCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase = None
features.append(
InputFeatures(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , label_ids=__lowerCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowercase ( __snake_case ):
UpperCamelCase = 42
UpperCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , __lowerCamelCase : TokenClassificationTask , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Tuple=False , __lowerCamelCase : Split = Split.train , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = os.path.join(
__lowerCamelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__lowerCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase = torch.load(__lowerCamelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase = token_classification_task.read_examples_from_file(__lowerCamelCase , __lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase = token_classification_task.convert_examples_to_features(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __lowerCamelCase )
def __len__( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , __lowerCamelCase : str ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowercase :
UpperCamelCase = 42
UpperCamelCase = -1_00
def __init__( self : str , __lowerCamelCase : TokenClassificationTask , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Any=False , __lowerCamelCase : Split = Split.train , ) -> str:
"""simple docstring"""
UpperCAmelCase = token_classification_task.read_examples_from_file(__lowerCamelCase , __lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase = token_classification_task.convert_examples_to_features(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase = tf.data.Dataset.from_generator(
__lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCAmelCase = tf.data.Dataset.from_generator(
__lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : int ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , __lowerCamelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 377
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
__a = 10
__a = 256
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[MinHash]:
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self : List[str] , *,
__lowerCamelCase : float = 0.85 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = duplication_jaccard_threshold
UpperCAmelCase = NUM_PERM
UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCAmelCase = defaultdict(__lowerCamelCase )
def _lowercase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : MinHash ) -> None:
"""simple docstring"""
UpperCAmelCase = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase = self.get_duplicate_clusters()
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = element
UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->float:
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a = None
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for elementa in cluster:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase = 1
extremes.append(lowerCAmelCase_ )
return extremes
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
global _shared_dataset
UpperCAmelCase = dataset
UpperCAmelCase = []
UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
UpperCAmelCase = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase = {}
UpperCAmelCase = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase = element
UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowerCAmelCase_ )}""" )
print(F"""Number of duplicate clusters: {len(lowerCAmelCase_ )}""" )
print(F"""Files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Unique files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Filtered dataset size: {len(lowerCAmelCase_ )}""" )
return ds_filter, duplicate_clusters
| 377
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Optional[Any] = {'vocab_file': 'vocab.json'}
_snake_case : Any = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case : Optional[int] = {'mgp-str': 27}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, _a, _a="[GO]", _a="[GO]", _a="[s]", _a="[GO]", **_a ) -> Dict:
super().__init__(
unk_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, **_a, )
with open(_a, encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(_a )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.vocab )
def __lowerCAmelCase ( self ) -> Tuple:
return dict(self.vocab, **self.added_tokens_encoder )
def __lowerCAmelCase ( self, _a ) -> int:
__SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def __lowerCAmelCase ( self, _a ) -> List[Any]:
return self.vocab.get(_a, self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self, _a ) -> Tuple:
return self.decoder.get(_a )
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_a, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=_a, ensure_ascii=_a ) + "\n" )
return (vocab_file,)
| 214
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ) -> Any:
"""simple docstring"""
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __snake_case ):
pass
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __snake_case ) is None
with patch_submodule(_test_patching , "len" , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_start_and_stop_mock__"
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , "open" , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_join__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_dirname__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __snake_case ):
pass
| 214
| 1
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase : Dict ) -> Dict:
lowerCAmelCase__ : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCAmelCase__ : Any = MaskFormerConfig(backbone_config=_lowerCAmelCase )
lowerCAmelCase__ : str = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
lowerCAmelCase__ : Dict = 8_4_7
lowerCAmelCase__ : int = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
lowerCAmelCase__ : List[Any] = 1_5_0
lowerCAmelCase__ : List[str] = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
lowerCAmelCase__ : Union[str, Any] = 1_7_1
lowerCAmelCase__ : Any = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
lowerCAmelCase__ : Tuple = 1_3_3
lowerCAmelCase__ : Dict = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
lowerCAmelCase__ : List[str] = 1_9
lowerCAmelCase__ : int = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
lowerCAmelCase__ : Optional[int] = 6_5
lowerCAmelCase__ : Union[str, Any] = "mapillary-vistas-id2label.json"
lowerCAmelCase__ : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def lowercase__ ( lowerCamelCase : str ) -> str:
lowerCAmelCase__ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def lowercase__ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> int:
lowerCAmelCase__ : Optional[Any] = dct.pop(_lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = val
def lowercase__ ( lowerCamelCase : List[Any] , lowerCamelCase : Any ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase__ : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase__ : List[str] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCAmelCase__ : Optional[int] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : str = in_proj_weight[:dim, :]
lowerCAmelCase__ : List[Any] = in_proj_bias[: dim]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase__ : str = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-dim :, :
]
lowerCAmelCase__ : int = in_proj_bias[-dim :]
# fmt: on
def lowercase__ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ) -> List[Any]:
# fmt: off
lowerCAmelCase__ : Optional[Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase__ : List[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : str = in_proj_weight[: hidden_size, :]
lowerCAmelCase__ : Any = in_proj_bias[:config.hidden_size]
lowerCAmelCase__ : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase__ : Any = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase__ : int = in_proj_weight[-hidden_size :, :]
lowerCAmelCase__ : Dict = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase__ : str = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCAmelCase__ : List[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[Any] = in_proj_weight[: hidden_size, :]
lowerCAmelCase__ : Tuple = in_proj_bias[:config.hidden_size]
lowerCAmelCase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase__ : int = in_proj_weight[-hidden_size :, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase__ ( ) -> Optional[int]:
lowerCAmelCase__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[Any] = False ) -> List[str]:
lowerCAmelCase__ : int = get_maskformer_config(_lowerCAmelCase )
# load original state_dict
with open(_lowerCAmelCase , "rb" ) as f:
lowerCAmelCase__ : Any = pickle.load(_lowerCAmelCase )
lowerCAmelCase__ : int = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCAmelCase__ : Union[str, Any] = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCAmelCase__ : Any = torch.from_numpy(_lowerCAmelCase )
# load 🤗 model
lowerCAmelCase__ : List[Any] = MaskFormerForInstanceSegmentation(_lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_lowerCAmelCase , param.shape )
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_lowerCAmelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
lowerCAmelCase__ : List[Any] = prepare_img()
if "vistas" in model_name:
lowerCAmelCase__ : Optional[int] = 6_5
elif "cityscapes" in model_name:
lowerCAmelCase__ : str = 6_5_5_3_5
else:
lowerCAmelCase__ : Any = 2_5_5
lowerCAmelCase__ : int = True if "ade" in model_name else False
lowerCAmelCase__ : Optional[Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase )
lowerCAmelCase__ : int = image_processor(_lowerCAmelCase , return_tensors="pt" )
lowerCAmelCase__ : Dict = model(**_lowerCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCAmelCase__ : List[Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 308
|
class __magic_name__ :
'''simple docstring'''
def __init__( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = {}
def _A ( self: Optional[Any] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(_lowerCamelCase ) for j in self.vertex[i]] ) )
def _A ( self: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def _A ( self: int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
def _A ( self: Dict , _lowerCamelCase: int , _lowerCamelCase: list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCamelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 234
| 0
|
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Union[str, Any] = str(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) == 9 and set(lowerCAmelCase__ ) == set('''123456789''' )
def __UpperCamelCase ( ):
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
__a : List[str] = 1_0_0_0_0_2 * base_num
if is_9_pandigital(lowerCAmelCase__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
__a : Optional[Any] = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowerCAmelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714
|
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
__a : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
__a : str = np.zeros((n + 1,) )
__a : List[Any] = ya
__a : str = xa
for k in range(lowerCAmelCase__ ):
__a : int = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
__a : List[str] = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 0
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ):
UpperCAmelCase__: str = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__: Any = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__: int = "The dog is cute and lives in the garden house"
UpperCAmelCase__: List[str] = jnp.array([tokenizer.encode(lowerCamelCase__ )] )
UpperCAmelCase__: str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__: Any = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
UpperCAmelCase__: Any = model(lowerCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase__ , atol=1e-3 ) )
| 113
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=_a ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Train language if it is different from the evaluation language."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__magic_name__ = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__: int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase__: Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase__: Optional[int] = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = train_dataset.features["label"].names
if training_args.do_eval:
UpperCAmelCase__: List[Any] = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Tuple = eval_dataset.features["label"].names
if training_args.do_predict:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = predict_dataset.features["label"].names
# Labels
UpperCAmelCase__: Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE ,idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase__: Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase__: Union[str, Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__: Optional[Any] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_train_samples )
UpperCAmelCase__: Optional[Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCAmelCase__: Dict = train_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__: Optional[int] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_eval_samples )
UpperCAmelCase__: List[Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCAmelCase__: Optional[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase__: Dict = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_predict_samples )
UpperCAmelCase__: Optional[Any] = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
UpperCAmelCase__: int = predict_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
UpperCAmelCase__: str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE ) else p.predictions
UpperCAmelCase__: Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase__: Any = default_data_collator
elif training_args.fpaa:
UpperCAmelCase__: Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE ,pad_to_multiple_of=8 )
else:
UpperCAmelCase__: Tuple = None
# Initialize our Trainer
UpperCAmelCase__: Dict = Trainer(
model=SCREAMING_SNAKE_CASE ,args=SCREAMING_SNAKE_CASE ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE ,tokenizer=SCREAMING_SNAKE_CASE ,data_collator=SCREAMING_SNAKE_CASE ,)
# Training
if training_args.do_train:
UpperCAmelCase__: Any = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__: Any = last_checkpoint
UpperCAmelCase__: Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = train_result.metrics
UpperCAmelCase__: Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Tuple = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__: Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: int = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE ,metric_key_prefix="predict" )
UpperCAmelCase__: Optional[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Union[str, Any] = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
UpperCAmelCase__: Optional[int] = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 113
| 1
|
"""simple docstring"""
_snake_case = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 659
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase = 16
UpperCamelCase = 32
def _A ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCAmelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCAmelCase_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase = mocked_dataloaders # noqa: F811
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
lowerCAmelCase__ = 2
# New Code #
lowerCAmelCase__ = int(args.gradient_accumulation_steps )
lowerCAmelCase__ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
set_seed(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=lowerCAmelCase_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class _snake_case ( unittest.TestCase):
def A__ ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split(), encoding="utf-8", check=__lowercase, )
assert hasattr(self, "env" )
def A__ ( self : Tuple, __lowercase : Tuple ):
# configuration for running training on smdistributed Model Parallel
lowercase__ = {
"enabled": True,
"processes_per_host": 8,
}
lowercase__ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowercase__ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowercase__ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''', instance_count=__lowercase, instance_type=self.instance_type, debugger_hook_config=__lowercase, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
}, metric_definitions=self.env.metric_definitions, distribution=__lowercase, py_version="py36", )
def A__ ( self : Union[str, Any], __lowercase : List[Any] ):
TrainingJobAnalytics(__lowercase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def A__ ( self : Dict, __lowercase : Optional[int] ):
# create estimator
lowercase__ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''', "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, __lowercase )
| 413
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> List[str]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE_ ( snake_case : dict[int, list[int]] )-> list[tuple[int, int]]:
_lowerCamelCase = 0
_lowerCamelCase = len(snake_case ) # No of vertices in graph
_lowerCamelCase = [0] * n
_lowerCamelCase = [False] * n
def dfs(snake_case : Dict , snake_case : Optional[Any] , snake_case : int , snake_case : Tuple ):
_lowerCamelCase = True
_lowerCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case , snake_case , snake_case , id_ )
_lowerCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_lowerCamelCase = min(low[at] , low[to] )
_lowerCamelCase = []
for i in range(snake_case ):
if not visited[i]:
dfs(snake_case , -1 , snake_case , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A_ : Optional[int] =[
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
A_ : List[Any] =[
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
A_ : Union[str, Any] =(
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
A_ : int =(
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
A_ : Dict =[
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : Dict )-> Optional[int]:
for tf_name, hf_name in patterns:
_lowerCamelCase = k.replace(snake_case , snake_case )
return k
def SCREAMING_SNAKE_CASE_ ( snake_case : dict , snake_case : dict )-> BigBirdPegasusForConditionalGeneration:
_lowerCamelCase = BigBirdPegasusConfig(**snake_case )
_lowerCamelCase = BigBirdPegasusForConditionalGeneration(snake_case )
_lowerCamelCase = torch_model.state_dict()
_lowerCamelCase = {}
# separating decoder weights
_lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
_lowerCamelCase = [k.endswith(snake_case ) for ending in KEYS_TO_IGNORE]
if any(snake_case ):
continue
_lowerCamelCase = DECODER_PATTERNS
_lowerCamelCase = rename_state_dict_key(snake_case , snake_case )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCamelCase = v.T
_lowerCamelCase = torch.from_numpy(snake_case )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
_lowerCamelCase = [k.endswith(snake_case ) for ending in KEYS_TO_IGNORE]
if any(snake_case ):
continue
_lowerCamelCase = REMAINING_PATTERNS
_lowerCamelCase = rename_state_dict_key(snake_case , snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCamelCase = v.T
_lowerCamelCase = torch.from_numpy(snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_lowerCamelCase = mapping['model.embed_positions.weight']
_lowerCamelCase = mapping.pop('model.embed_positions.weight' )
_lowerCamelCase , _lowerCamelCase = torch_model.load_state_dict(snake_case , strict=snake_case )
_lowerCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Dict:
_lowerCamelCase = tf.train.list_variables(snake_case )
_lowerCamelCase = {}
_lowerCamelCase = ['global_step']
for name, shape in tqdm(snake_case , desc='converting tf checkpoint to dict' ):
_lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase = tf.train.load_variable(snake_case , snake_case )
_lowerCamelCase = array
return tf_weights
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str , snake_case : dict )-> List[str]:
_lowerCamelCase = get_tf_weights_as_numpy(snake_case )
_lowerCamelCase = convert_bigbird_pegasus(snake_case , snake_case )
torch_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
A_ : Union[str, Any] =parser.parse_args()
A_ : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 222
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = emb.weight.shape
lowercase_ : int = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowercase_ : List[str] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : Optional[int] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase_ : int = mam_aaa["model"]
remove_ignore_keys_(_UpperCamelCase )
lowercase_ : int = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase_ : Optional[Any] = MaMaaaConfig(
vocab_size=_UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase_ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
lowercase_ : List[str] = MaMaaaForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
lowercase_ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 620
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : List[str]=2 , snake_case__ : List[str]=3 , snake_case__ : Tuple=4 , snake_case__ : Optional[Any]=2 , snake_case__ : int=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Optional[Any]=3_6 , snake_case__ : Tuple=3 , snake_case__ : int=4 , snake_case__ : Tuple=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Optional[int]=1_6 , snake_case__ : Dict=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=6 , snake_case__ : int=6 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=1_0_0_0 , ) -> Optional[int]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = text_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = coordinate_size
_lowerCamelCase = shape_size
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase = text_seq_length
_lowerCamelCase = (image_size // patch_size) ** 2 + 1
_lowerCamelCase = self.text_seq_length + self.image_seq_length
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase = bbox[i, j, 3]
_lowerCamelCase = bbox[i, j, 1]
_lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase = bbox[i, j, 2]
_lowerCamelCase = bbox[i, j, 0]
_lowerCamelCase = t
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] ) -> Any:
_lowerCamelCase = LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
_lowerCamelCase = model(snake_case__ , pixel_values=snake_case__ )
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase = model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int ) -> List[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Dict ) -> Optional[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any ) -> Dict:
_lowerCamelCase = LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self : str ) -> Any:
_lowerCamelCase = LayoutLMvaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict=False ) -> str:
_lowerCamelCase = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
_lowerCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case__ , )
return inputs_dict
def _snake_case ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self : int ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _snake_case ( self : Any ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def _snake_case ( self : Optional[int] ) -> List[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase ( ) -> List[Any]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : int ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def _snake_case ( self : List[str] ) -> Tuple:
_lowerCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
_lowerCamelCase = torch.tensor([[1, 2]] )
_lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase = model(
input_ids=input_ids.to(snake_case__ ) , bbox=bbox.to(snake_case__ ) , pixel_values=pixel_values.to(snake_case__ ) , )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ) )
| 544
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowercase : Optional[int] = 6_378_137.0
_lowercase : Any = 6_356_752.314_245
_lowercase : Any = 6_3_7_8_1_3_7
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCamelCase__ : str =atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
lowerCamelCase__ : List[Any] =atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCamelCase__ : List[str] =haversine_distance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCamelCase__ : Any =(b_lata + b_lata) / 2
lowerCamelCase__ : List[Any] =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCamelCase__ : List[Any] =(sin(__lowerCamelCase ) ** 2) * (cos(__lowerCamelCase ) ** 2)
lowerCamelCase__ : Any =cos(sigma / 2 ) ** 2
lowerCamelCase__ : List[str] =(sigma - sin(__lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCamelCase__ : List[str] =(cos(__lowerCamelCase ) ** 2) * (sin(__lowerCamelCase ) ** 2)
lowerCamelCase__ : Dict =sin(sigma / 2 ) ** 2
lowerCamelCase__ : List[str] =(sigma + sin(__lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
|
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_a : Any = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase: int = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[int] ):
a : Optional[Any] = feature_size
a : Tuple = sampling_rate
a : str = padding_value
a : Any = kwargs.pop('padding_side' , 'right' )
a : Tuple = kwargs.pop('return_attention_mask' , __snake_case )
super().__init__(**__snake_case )
def lowercase_ ( self : Tuple , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a : Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
a : Tuple = processed_features[self.model_input_names[0]]
a : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
a : Dict = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a : Optional[int] = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a : Dict = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
a : Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
a : Any = 'tf'
elif is_torch_tensor(__snake_case ):
a : Optional[Any] = 'pt'
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
a : List[Any] = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(__snake_case )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a : List[Any] = to_numpy(__snake_case )
else:
a : Tuple = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
a : Any = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
a : int = processed_features[self.model_input_names[0]]
a : Dict = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
a : Optional[int] = []
for i in range(__snake_case ):
a : int = {k: v[i] for k, v in processed_features.items()}
# truncation
a : List[str] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a : List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a : Tuple = PaddingStrategy.MAX_LENGTH
a : int = {}
for i in range(__snake_case ):
# padding
a : int = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
a : int = []
if value.dtype is np.dtype(np.floataa ):
a : List[str] = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a : Any = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a : Union[str, Any] = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
a : Tuple = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
a : Any = np.pad(
processed_features['attention_mask'] , (0, difference) )
a : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a : Optional[int] = np.pad(
__snake_case , __snake_case , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a : Optional[int] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
a : Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a : List[Any] = np.pad(
__snake_case , __snake_case , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
a : Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : str = len(__snake_case ) > max_length
if needs_to_be_truncated:
a : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a : List[str] = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase_ ( self : Dict , __snake_case : Optional[Any]=False , __snake_case : Optional[int]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
a : Union[str, Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
a : List[Any] = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
a : List[str] = padding
else:
a : Dict = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 195
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase: List[str] = TypeVar('T')
def lowerCamelCase__ ( _A ):
return (position - 1) // 2
def lowerCamelCase__ ( _A ):
return (2 * position) + 1
def lowerCamelCase__ ( _A ):
return (2 * position) + 2
class a__( Generic[T] ):
def __init__( self : Optional[Any] ):
a : list[tuple[T, int]] = []
a : dict[T, int] = {}
a : int = 0
def __len__( self : Union[str, Any] ):
return self.elements
def __repr__( self : Any ):
return str(self.heap )
def lowercase_ ( self : Optional[int] ):
# Check if the priority queue is empty
return self.elements == 0
def lowercase_ ( self : Union[str, Any] , __snake_case : T , __snake_case : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
a : int = self.elements
self.elements += 1
self._bubble_up(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
a , a : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a , a : List[Any] = self.heap[0]
self._bubble_down(__snake_case )
return elem
def lowercase_ ( self : List[str] , __snake_case : T , __snake_case : int ):
# Update the weight of the given key
a : Any = self.position_map[elem]
a : Union[str, Any] = (elem, weight)
if position > 0:
a : Optional[Any] = get_parent_position(__snake_case )
a , a : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__snake_case )
else:
self._bubble_down(__snake_case )
else:
self._bubble_down(__snake_case )
def lowercase_ ( self : int , __snake_case : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
a : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
a : Union[str, Any] = get_parent_position(__snake_case )
a , a : Tuple = self.heap[curr_pos]
a , a : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_up(__snake_case )
return None
def lowercase_ ( self : Tuple , __snake_case : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
a : str = self.position_map[elem]
a , a : List[Any] = self.heap[curr_pos]
a : int = get_child_left_position(__snake_case )
a : Dict = get_child_right_position(__snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
a , a : int = self.heap[child_left_position]
a , a : Optional[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
if child_left_position < self.elements:
a , a : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
else:
return None
if child_right_position < self.elements:
a , a : List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
return None
def lowercase_ ( self : List[Any] , __snake_case : int , __snake_case : int ):
# Swap the nodes at the given positions
a : Optional[int] = self.heap[nodea_pos][0]
a : int = self.heap[nodea_pos][0]
a , a : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a : Tuple = nodea_pos
a : Any = nodea_pos
class a__( Generic[T] ):
def __init__( self : Optional[int] ):
a : dict[T, dict[T, int]] = {}
a : int = 0
def __repr__( self : List[str] ):
return str(self.connections )
def __len__( self : str ):
return self.nodes
def lowercase_ ( self : int , __snake_case : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
a : Optional[int] = {}
self.nodes += 1
def lowercase_ ( self : Dict , __snake_case : T , __snake_case : T , __snake_case : int ):
# Add an edge between 2 nodes in the graph
self.add_node(__snake_case )
self.add_node(__snake_case )
a : Any = weight
a : Dict = weight
def lowerCamelCase__ ( _A , ):
a : dict[T, int] = {node: maxsize for node in graph.connections}
a : dict[T, T | None] = {node: None for node in graph.connections}
a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_A , _A )
if priority_queue.is_empty():
return dist, parent
# initialization
a : int = priority_queue.extract_min()
a : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
a : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Optional[int] = node
return dist, parent
| 195
| 1
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Any , a_ : Union[str, Any]=13 , a_ : Any=7 , a_ : Any=True , a_ : Dict=True , a_ : Union[str, Any]=False , a_ : Tuple=True , a_ : str=99 , a_ : Tuple=64 , a_ : Tuple=5 , a_ : Union[str, Any]=4 , a_ : Dict=64 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : List[str]=0.1 , a_ : Dict=512 , a_ : Tuple=16 , a_ : str=2 , a_ : Any=0.02 , a_ : List[Any]=3 , a_ : Tuple=4 , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : int ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A ( self : str ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Tuple , a_ : int , a_ : str , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Any , a_ : int , a_ : Tuple , a_ : str , a_ : int , a_ : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , a_ : Any , a_ : int , a_ : Union[str, Any] , a_ : Dict , a_ : Optional[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[Any] , a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Dict , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : str , a_ : Optional[int] , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = MPNetModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel.from_pretrained("microsoft/mpnet-base" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = model(a_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
__snake_case = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 69
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : int ="https://openaipublic.azureedge.net/jukebox/models/"
__lowerCAmelCase : Any ={
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCamelCase ( _lowerCamelCase : str ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
A__ = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
A__ = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
A__ = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str ):
A__ = {}
import re
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_conv_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
A__ = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_proj_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
A__ = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
A__ = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
A__ = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
A__ = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
A__ = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
A__ = original_key
A__ = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
A__ = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
A__ = original_key
A__ = original_key
A__ = value
return new_dict
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : str=None , _lowerCamelCase : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
A__ = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
A__ = MODEL_MAPPING[model_name.split("/" )[-1]]
A__ = JukeboxConfig.from_pretrained(_lowerCamelCase )
A__ = JukeboxModel(_lowerCamelCase )
A__ = []
A__ = {}
for i, dict_name in enumerate(_lowerCamelCase ):
A__ = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
A__ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
A__ = old_dic[k]
elif k.endswith(".w" ):
A__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ = old_dic[k]
else:
A__ = old_dic[k]
A__ = "vqvae" if i == 0 else F"priors.{3 - i}"
A__ = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
A__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCAmelCase : int =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 440
| 0
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase :Optional[int] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def a ( lowerCamelCase__=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Dict = None
def _a (self , lowercase , lowercase ):
with TemporaryDirectory() as tmp_dir:
A_ : Optional[Any] = dataset_module_factory(lowercase , cache_dir=lowercase )
A_ : Optional[Any] = import_main_class(dataset_module.module_path , dataset=lowercase )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , )
A_ : int = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowercase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
A_ : Optional[int] = cached_path(lowercase , cache_dir=lowercase )
self.assertTrue(os.path.exists(lowercase ) )
@pytest.mark.integration
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
A_ : Union[str, Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ )
A_ : List[str] = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : List[str] = None
builder_instance.download_and_prepare()
A_ : Dict = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ )
A_ : List[Any] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
A_ : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowerCamelCase__ )
assert next(iter(ds["""train"""] ) )
| 686
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 686
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: float ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 578
|
'''simple docstring'''
import pytest
UpperCamelCase__ : Tuple = '''__dummy_dataset1__'''
UpperCamelCase__ : Dict = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : Any = dataset_loading_script_name
__SCREAMING_SNAKE_CASE : List[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = script_dir / F"{script_name}.py"
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
| 578
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( snake_case__ ):
_lowercase ='''conditional_detr'''
_lowercase =['''past_key_values''']
_lowercase ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=3 , _UpperCamelCase=300 , _UpperCamelCase=6 , _UpperCamelCase=2_048 , _UpperCamelCase=8 , _UpperCamelCase=6 , _UpperCamelCase=2_048 , _UpperCamelCase=8 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=256 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1.0 , _UpperCamelCase=False , _UpperCamelCase="sine" , _UpperCamelCase="resnet50" , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=0.25 , **_UpperCamelCase , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ = CONFIG_MAPPING['resnet'](out_features=["stage4"] )
elif isinstance(_A , _A ):
lowerCAmelCase_ = backbone_config.get("model_type" )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(_A )
lowerCAmelCase_ = use_timm_backbone
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = backbone
lowerCAmelCase_ = use_pretrained_backbone
lowerCAmelCase_ = dilation
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = cls_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
def __a ( self ) -> int:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
class _lowerCAmelCase ( snake_case__ ):
_lowercase =version.parse('''1.11''' )
@property
def __a ( self ) -> str:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __a ( self ) -> Optional[Any]:
return 1e-5
@property
def __a ( self ) -> Optional[Any]:
return 12
| 711
|
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_A = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 279
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
"""simple docstring"""
__A = MvpTokenizer
__A = MvpTokenizerFast
__A = True
__A = filter_roberta_detectors
def a ( self : List[str] ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCAmelCase = {'unk_token': '<unk>'}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCAmelCase ) )
def a ( self : Tuple , **__lowerCAmelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a ( self : Any , **__lowerCAmelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a ( self : Dict , __lowerCAmelCase : Any ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a ( self : Optional[int] ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def a ( self : int ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , __lowerCAmelCase )
self.assertIn('attention_mask' , __lowerCAmelCase )
self.assertNotIn('labels' , __lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , __lowerCAmelCase )
@require_torch
def a ( self : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = ['A long paragraph for summarization.']
_lowerCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors='pt' )
_lowerCAmelCase = inputs['input_ids']
_lowerCAmelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a ( self : int ):
"""simple docstring"""
pass
def a ( self : Any ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
_lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
_lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( _lowerCamelCase : list[float] ):
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:Optional[Any] ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__magic_name__ = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
__magic_name__ = int(sequence[i] , 2 )
return sequence
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ = gray_code_sequence_string(bit_count - 1 )
__magic_name__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ = "0" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ = "1" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list , __lowerCamelCase:list ):
'''simple docstring'''
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ) ) )
def _lowerCAmelCase ( __lowerCamelCase:list[float] ):
'''simple docstring'''
if point:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for item in point:
if not isinstance(__lowerCamelCase , (int, float) ):
__magic_name__ = (
"Expected a list of numbers as input, found "
f'''{type(__lowerCamelCase ).__name__}'''
)
raise TypeError(__lowerCamelCase )
else:
__magic_name__ = f'''Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}'''
raise TypeError(__lowerCamelCase )
else:
raise ValueError("Missing an input" )
def _lowerCAmelCase ( __lowerCamelCase:list , __lowerCamelCase:list ):
'''simple docstring'''
_validate_point(__lowerCamelCase )
_validate_point(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase , __lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'audio': Audio()} )
a : ClassVar[Features] = Features({'labels': ClassLabel} )
a : str = "audio"
a : str = "labels"
def _snake_case ( self : Tuple , __UpperCamelCase : Dict ) ->List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.label_schema.copy()
_UpperCAmelCase = features[self.label_column]
_UpperCAmelCase = label_schema
return task_template
@property
def _snake_case ( self : Any ) ->Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 555
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
a : int = '''scheduler_config.json'''
class a_ ( _UpperCAmelCase ):
a : List[Any] = 1
a : Tuple = 2
a : Dict = 3
a : str = 4
a : Optional[int] = 5
a : Any = 6
a : int = 7
a : Any = 8
a : List[Any] = 9
a : Any = 10
a : List[str] = 11
a : Optional[int] = 12
a : Any = 13
a : Dict = 14
@dataclass
class a_ ( _UpperCAmelCase ):
a : torch.FloatTensor
class a_ :
a : Dict = SCHEDULER_CONFIG_NAME
a : List[str] = []
a : Optional[int] = True
@classmethod
def _snake_case ( cls : List[str] , __UpperCamelCase : Dict[str, Any] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : List[str]=False , **__UpperCamelCase : int , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , return_commit_hash=__UpperCamelCase , **__UpperCamelCase , )
return cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, os.PathLike] , __UpperCamelCase : bool = False , **__UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase )
@property
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _snake_case ( cls : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split(""".""" )[0] )
_UpperCAmelCase = [
getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase )
]
return compatible_classes
| 555
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : List[str]=[32, 64, 1_28] , lowerCamelCase_ : Any=[1, 2, 1] , lowerCamelCase_ : Optional[Any]=[2, 2, 4] , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[str]=2.0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=0.02 , lowerCamelCase_ : int=1E-5 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Any=10 , lowerCamelCase_ : Optional[Any]=8 , lowerCamelCase_ : Any=["stage1", "stage2"] , lowerCamelCase_ : Tuple=[1, 2] , ) -> Optional[int]:
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = hidden_sizes
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ) -> Tuple:
__a = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> Union[str, Any]:
__a = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a = None
__a = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] ) -> List[str]:
__a = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a = 1
__a = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ) -> Tuple:
__a = self.type_sequence_label_size
__a = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a = 1
__a = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( A_ , A_ , unittest.TestCase ):
A_ : List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A_ : List[Any] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
A_ : int = False
A_ : str = False
A_ : Any = False
A_ : Optional[Any] = False
A_ : Union[str, Any] = False
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
__a = FocalNetModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase_ ( self : Any ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : str ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def lowerCAmelCase_ ( self : str ) -> Dict:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def lowerCAmelCase_ ( self : List[str] ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a = model_class(lowerCamelCase_ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> List[Any]:
__a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# FocalNet has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self : str ) -> Optional[int]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Any ) -> List[str]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
__a = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Tuple ) -> int:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__a = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
__a = self.default_image_processor
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__a = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase_ )
# verify the logits
__a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
__a = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class a ( A_ , unittest.TestCase ):
A_ : Dict = (FocalNetBackbone,) if is_torch_available() else ()
A_ : Dict = FocalNetConfig
A_ : Any = False
def lowerCAmelCase_ ( self : Dict ) -> int:
__a = FocalNetModelTester(self )
| 173
|
"""simple docstring"""
__A = 6_55_21
def UpperCamelCase ( _lowerCAmelCase : str ):
__a = 1
__a = 0
for plain_chr in plain_text:
__a = (a + ord(_lowerCAmelCase )) % MOD_ADLER
__a = (b + a) % MOD_ADLER
return (b << 16) | a
| 173
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase :
__lowerCamelCase = PegasusConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self :Any , _lowercase :List[Any] , _lowercase :int=13 , _lowercase :str=7 , _lowercase :Optional[Any]=True , _lowercase :Tuple=False , _lowercase :int=99 , _lowercase :Optional[Any]=32 , _lowercase :Any=5 , _lowercase :Any=4 , _lowercase :List[Any]=37 , _lowercase :Union[str, Any]=0.1 , _lowercase :Optional[Any]=0.1 , _lowercase :str=20 , _lowercase :List[str]=2 , _lowercase :str=1 , _lowercase :List[str]=0 , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowercase__ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCAmelCase ( self :Tuple , _lowercase :List[Any] , _lowercase :Any , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = model_class_name(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] )
lowercase__ , lowercase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
lowercase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
lowercase__ = model.decode(_lowercase , _lowercase )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = model_class_name(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] )
lowercase__ , lowercase__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowercase__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase__ = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
lowercase__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase__ = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowercase__ = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
lowercase__ = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ):
if attention_mask is None:
lowercase__ = np.not_equal(__magic_name__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase__ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__lowerCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = FlaxPegasusModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase :Union[str, Any] , _lowercase :int=None , **_lowercase :str ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
lowercase__ = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = model_class(_lowercase )
lowercase__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowercase__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase :List[str] , _lowercase :List[str] , _lowercase :Any ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
lowercase__ = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowercase__ = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_lowercase )
lowercase__ = np.ones((1, 1) )
lowercase__ = model(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
lowercase__ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
lowercase__ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowercase__ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
lowercase__ = tokenizer(_lowercase , return_tensors="np" , truncation=_lowercase , max_length=5_12 , padding=_lowercase )
lowercase__ = model.generate(**_lowercase , num_beams=2 ).sequences
lowercase__ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
assert tgt_text == decoded
| 655
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = LongformerTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = LongformerTokenizerFast
lowerCAmelCase_ = True
def lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = 'lower newer'
_snake_case = 'lower newer'
return input_text, output_text
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = 'lower newer'
_snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case = tokenizer.tokenize(lowerCAmelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase_ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase_ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self ) -> int:
_snake_case = self.get_tokenizer()
_snake_case = 'Encode this sequence.'
_snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing spaces after special tokens
_snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
_snake_case = 'Encode <mask> sequence'
_snake_case = 'Encode <mask>sequence'
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = encoded.index(lowerCAmelCase_ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ )
_snake_case = encoded.index(lowerCAmelCase_ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[Any]:
pass
def lowerCAmelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowerCAmelCase ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = F'''{text_of_1_token} {text_of_1_token}'''
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_snake_case = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
| 710
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''vit_msn'''
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-06 , lowerCAmelCase_=224 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> str:
super().__init__(**lowerCAmelCase_ )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = qkv_bias
| 541
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__lowerCAmelCase = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__lowerCAmelCase = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = RoFormerTokenizer
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
):
_snake_case = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = pre_tok_class(**UpperCAmelCase )
_snake_case = do_lower_case
def __getstate__(self ) -> Any:
_snake_case = self.__dict__.copy()
_snake_case = BertPreTokenizer()
return state
def __setstate__(self , UpperCAmelCase ) -> List[str]:
_snake_case = d
_snake_case = self.__dict__["""_tokenizer"""].get_vocab()
_snake_case = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase ) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> int:
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , **UpperCAmelCase , ) -> List[str]:
_snake_case = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
| 585
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__lowerCAmelCase = True
from torch.cuda.amp import autocast
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase_ = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case = logging.WARNING
if model_args.verbose_logging:
_snake_case = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_snake_case = logging.INFO
logger.setLevel(_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase_ = field(
default=__snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = "longest"
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__(self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_snake_case = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
_snake_case = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
_snake_case = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_snake_case = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
_snake_case = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_snake_case = 1
_snake_case = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_snake_case = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[int]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
_snake_case = 0
_snake_case = max_gumbel_temp
_snake_case = min_gumbel_temp
_snake_case = gumbel_temp_decay
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
model.train()
_snake_case = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
_snake_case = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_snake_case = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
configure_logger(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_snake_case = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_SCREAMING_SNAKE_CASE )
def prepare_dataset(_SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
_snake_case, _snake_case = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_snake_case = datasets.map(
_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
_snake_case = vectorized_datasets.filter(
lambda _SCREAMING_SNAKE_CASE : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_SCREAMING_SNAKE_CASE ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_snake_case = vectorized_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_snake_case = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
_snake_case = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
_snake_case = DataCollatorForWavaVecaPretraining(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
_snake_case = WavaVecaPreTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 585
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase__ = parse(importlib.metadata.version("torch"))
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
_UpperCamelCase : Optional[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Tuple = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ ,parse(lowercase_ ) )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
return compare_versions(lowercase_ ,lowercase_ ,lowercase_ )
| 708
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__ ( lowercase_ ,lowercase_ ) -> str | None:
"""simple docstring"""
_UpperCamelCase : str = ""
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : int
for keychar, cipherchar in zip(cycle(lowercase_ ) ,lowercase_ ):
_UpperCamelCase : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase_ )
return decoded
def lowercase__ ( lowercase_ ) -> list[str]:
"""simple docstring"""
_UpperCamelCase : list[str] = []
for key in product(lowercase_ ,repeat=3 ):
_UpperCamelCase : int = try_key(lowercase_ ,lowercase_ )
if encoded is not None:
possibles.append(lowercase_ )
return possibles
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__ ( lowercase_ = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase : list[int]
_UpperCamelCase : list[str]
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : str = Path(lowercase_ ).parent.joinpath(lowercase_ ).read_text(encoding="utf-8" )
_UpperCamelCase : Optional[Any] = [int(lowercase_ ) for number in data.strip().split("," )]
_UpperCamelCase : List[str] = filter_valid_chars(lowercase_ )
for common_word in COMMON_WORDS:
_UpperCamelCase : Union[str, Any] = filter_common_word(lowercase_ ,lowercase_ )
if len(lowercase_ ) == 1:
break
_UpperCamelCase : Union[str, Any] = possibles[0]
return sum(ord(lowercase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.