code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase = json.loads(__SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase = json.loads(__SCREAMING_SNAKE_CASE )
if not mpi_options.get('sagemaker_mpi_enabled' , __SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , snake_case , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
lowercase = torch.device('cpu' )
lowercase = 0
elif is_sagemaker_model_parallel_available():
lowercase = smp.local_rank()
lowercase = torch.device('cuda' , snake_case )
lowercase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
lowercase = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
lowercase = torch.device('cuda' , self.local_rank )
lowercase = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return False
| 84
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : List[str]=10_00 ) -> Dict:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCAmelCase = n - 1
__lowerCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCAmelCase = 0
while count < prec:
__lowerCAmelCase = random.randint(2 , n - 1 )
__lowerCAmelCase = bin_exp_mod(snake_case_ , snake_case_ , snake_case_ )
if b != 1:
__lowerCAmelCase = True
for _ in range(snake_case_ ):
if b == n - 1:
__lowerCAmelCase = False
break
__lowerCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_A : Union[str, Any] = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 427
| 0
|
import enum
import shutil
import sys
UpperCAmelCase = shutil.get_terminal_size()
UpperCAmelCase = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __snake_case( enum.Enum ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 1
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple="" ) -> List[str]:
"""simple docstring"""
sys.stdout.write(str(UpperCAmelCase__ ) + end )
sys.stdout.flush()
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict="" ) -> Tuple:
"""simple docstring"""
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , UpperCAmelCase__ )
def _snake_case ( ) -> List[str]:
"""simple docstring"""
forceWrite("""\r""" )
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def _snake_case ( ) -> List[Any]:
"""simple docstring"""
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _snake_case ( ) -> Dict:
"""simple docstring"""
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 708
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = ''
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
# authorize twitter, initialize tweepy
lowerCAmelCase = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
print(f'...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , """w""" ) as f:
lowerCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 344
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class UpperCamelCase ( __a ):
a__ :int = '''encodec'''
def __init__(self , __UpperCamelCase=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCamelCase=24_000 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=128 , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=[8, 5, 4, 2] , __UpperCamelCase="weight_norm" , __UpperCamelCase=7 , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase="reflect" , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=1.0 , __UpperCamelCase=1_024 , __UpperCamelCase=None , __UpperCamelCase=True , **__UpperCamelCase , ) -> Dict:
UpperCamelCase_ : str = target_bandwidths
UpperCamelCase_ : Tuple = sampling_rate
UpperCamelCase_ : str = audio_channels
UpperCamelCase_ : Any = normalize
UpperCamelCase_ : Union[str, Any] = chunk_length_s
UpperCamelCase_ : Optional[Any] = overlap
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : Dict = num_filters
UpperCamelCase_ : List[str] = num_residual_layers
UpperCamelCase_ : Optional[int] = upsampling_ratios
UpperCamelCase_ : str = norm_type
UpperCamelCase_ : str = kernel_size
UpperCamelCase_ : Tuple = last_kernel_size
UpperCamelCase_ : int = residual_kernel_size
UpperCamelCase_ : str = dilation_growth_rate
UpperCamelCase_ : str = use_causal_conv
UpperCamelCase_ : str = pad_mode
UpperCamelCase_ : Union[str, Any] = compress
UpperCamelCase_ : Dict = num_lstm_layers
UpperCamelCase_ : Optional[int] = trim_right_ratio
UpperCamelCase_ : int = codebook_size
UpperCamelCase_ : str = codebook_dim if codebook_dim is not None else hidden_size
UpperCamelCase_ : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__UpperCamelCase )
@property
def A_ (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A_ (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A_ (self ) -> int:
UpperCamelCase_ : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A_ (self ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 635
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 618
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : str = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """layoutlmv3"""
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , _UpperCAmelCase=128 , _UpperCAmelCase=128 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Tuple:
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = max_ad_position_embeddings
UpperCamelCase_ = coordinate_size
UpperCamelCase_ = shape_size
UpperCamelCase_ = has_relative_attention_bias
UpperCamelCase_ = rel_pos_bins
UpperCamelCase_ = max_rel_pos
UpperCamelCase_ = has_spatial_attention_bias
UpperCamelCase_ = rel_ad_pos_bins
UpperCamelCase_ = max_rel_ad_pos
UpperCamelCase_ = text_embed
UpperCamelCase_ = visual_embed
UpperCamelCase_ = input_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = classifier_dropout
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = version.parse("""1.12""" )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
return 12
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
UpperCamelCase_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase_ = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 618
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE_: int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78
|
import math
import sys
def A ( _lowerCamelCase ):
'''simple docstring'''
if number != int(_lowerCamelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase : Union[str, Any] = [-1] * (number + 1)
_lowerCAmelCase : Optional[Any] = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase : List[Any] = sys.maxsize
_lowerCAmelCase : str = int(math.sqrt(_lowerCamelCase ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase : Dict = 1 + answers[i - (j**2)]
_lowerCAmelCase : List[str] = min(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase_="<s>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_=None , ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ = bos, unk, pad, eos
snake_case_ = []
snake_case_ = []
snake_case_ = {}
snake_case_ = self.add_symbol(UpperCAmelCase_ )
snake_case_ = self.add_symbol(UpperCAmelCase_ )
snake_case_ = self.add_symbol(UpperCAmelCase_ )
snake_case_ = self.add_symbol(UpperCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase_ )
snake_case_ = len(self.symbols )
def __eq__( self , UpperCAmelCase_ ):
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase_ ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , UpperCAmelCase_ ):
return sym in self.indices
@classmethod
def _lowercase ( cls , UpperCAmelCase_ ):
snake_case_ = cls()
d.add_from_file(UpperCAmelCase_ )
return d
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=1 , UpperCAmelCase_=False ):
if word in self.indices and not overwrite:
snake_case_ = self.indices[word]
snake_case_ = self.count[idx] + n
return idx
else:
snake_case_ = len(self.symbols )
snake_case_ = idx
self.symbols.append(UpperCAmelCase_ )
self.count.append(UpperCAmelCase_ )
return idx
def _lowercase ( self , UpperCAmelCase_ ):
return 0
def _lowercase ( self , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase_ ) )
return
snake_case_ = f.readlines()
snake_case_ = self._load_meta(UpperCAmelCase_ )
for line in lines[indices_start_line:]:
try:
snake_case_ , snake_case_ = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
snake_case_ = True
snake_case_ , snake_case_ = line.rsplit(" " , 1 )
else:
snake_case_ = False
snake_case_ = int(UpperCAmelCase_ )
snake_case_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase_ ) )
self.add_symbol(UpperCAmelCase_ , n=UpperCAmelCase_ , overwrite=UpperCAmelCase_ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def __snake_case ( lowercase : Union[str, Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
snake_case_ = dict((re.sub(r"@@$" , "" , lowercase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , lowercase ), v) for k, v in d.items() )
snake_case_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
snake_case_ = d[k] # restore
return da
def __snake_case ( lowercase : str , lowercase : List[Any] ):
# prep
if not os.path.exists(lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowercase , exist_ok=lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
snake_case_ = os.path.join(lowercase , "checkpoint.pt" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
snake_case_ = torch.load(lowercase , map_location="cpu" )
snake_case_ = chkpt["cfg"]["model"]
# dicts
snake_case_ = os.path.join(lowercase , "dict.txt" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
snake_case_ = Dictionary.load(lowercase )
snake_case_ = rewrite_dict_keys(src_dict.indices )
snake_case_ = len(lowercase )
snake_case_ = os.path.join(lowercase , VOCAB_FILES_NAMES["vocab_file"] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# merges_file (bpecodes)
snake_case_ = os.path.join(lowercase , "bpecodes" )
if not os.path.isfile(lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
snake_case_ = os.path.join(lowercase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(lowercase , lowercase )
# model config
snake_case_ = os.path.join(lowercase , "config.json" )
snake_case_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# tokenizer config
snake_case_ = os.path.join(lowercase , lowercase )
snake_case_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase , ensure_ascii=lowercase , indent=lowercase ) )
# model
snake_case_ = chkpt["model"]
# remove unneeded keys
snake_case_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(lowercase , lowercase )
snake_case_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
snake_case_ = model_state_dict.pop(lowercase )
else:
snake_case_ = model_state_dict.pop(lowercase )
snake_case_ = BioGptConfig.from_pretrained(lowercase )
snake_case_ = BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
snake_case_ = os.path.join(lowercase , lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowercase , lowercase )
print("Conversion is done!" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 420
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : list[int] , lowercase : int , ):
snake_case_ , snake_case_ = coefficient_matrix.shape
snake_case_ , snake_case_ = constant_matrix.shape
if rowsa != colsa:
snake_case_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if colsa != 1:
snake_case_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if rowsa != rowsa:
snake_case_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase )
if len(lowercase ) != rowsa:
snake_case_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(lowercase )} and {rowsa}'''
)
raise ValueError(lowercase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
snake_case_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case_ , snake_case_ = table.shape
strictly_diagonally_dominant(lowercase )
# Iterates the whole matrix for given number of times
for _ in range(lowercase ):
snake_case_ = []
for row in range(lowercase ):
snake_case_ = 0
for col in range(lowercase ):
if col == row:
snake_case_ = table[row][col]
elif col == cols - 1:
snake_case_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case_ = (temp + val) / denom
new_val.append(lowercase )
snake_case_ = new_val
return [float(lowercase ) for i in new_val]
def __snake_case ( lowercase : NDArray[floataa] ):
snake_case_ , snake_case_ = table.shape
snake_case_ = True
for i in range(0 , lowercase ):
snake_case_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
| 1
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : Any = (UnCLIPScheduler,)
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**a__ )
return config
def UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=a__ , prev_timestep=a__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config(variance_type='fixed_small_log' )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**a__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_scheduler_config(variance_type='learned_range' )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**a__ )
SCREAMING_SNAKE_CASE_ : str = 0.5
assert scheduler._get_variance(1 , predicted_variance=a__ ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=a__ ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=a__ ) - -0.0010011 < 1e-5
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**a__ )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(a__ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : Dict = model(a__ , a__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : str = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
SCREAMING_SNAKE_CASE_ : str = pred_prev_sample
SCREAMING_SNAKE_CASE_ : Dict = torch.sum(torch.abs(a__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**a__ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE_ : Any = scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(a__ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : List[Any] = model(a__ , a__ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE_ : Tuple = None
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(
a__ , a__ , a__ , prev_timestep=a__ , generator=a__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[int] = pred_prev_sample
SCREAMING_SNAKE_CASE_ : Dict = torch.sum(torch.abs(a__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
| 511
|
"""simple docstring"""
def UpperCAmelCase ( snake_case : int = 100 ):
_lowerCAmelCase:Dict = set()
_lowerCAmelCase:Optional[Any] = 0
_lowerCAmelCase:int = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
_lowerCAmelCase:str = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 227
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , torchscript=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , fpaa=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tinier_bart'
_lowerCAmelCase = AutoConfig.from_pretrained(__magic_name__ )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__magic_name__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__magic_name__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__magic_name__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(__magic_name__ , 'env.csv' ) , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
benchmark.run()
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__magic_name__ , 'env.csv' ) ).exists() )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__magic_name__ ):
self.assertTrue(hasattr(__magic_name__ , 'sequential' ) )
self.assertTrue(hasattr(__magic_name__ , 'cumulative' ) )
self.assertTrue(hasattr(__magic_name__ , 'current' ) )
self.assertTrue(hasattr(__magic_name__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , 'log.txt' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , multi_process=__magic_name__ , )
_lowerCAmelCase = PyTorchBenchmark(__magic_name__ )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__magic_name__ , 'log.txt' ) ).exists() )
| 702
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return np.maximum(0, __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MobileViTFeatureExtractor']
lowercase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10**9) -> int:
'''simple docstring'''
__UpperCamelCase : int = 1
__UpperCamelCase : Any = 2
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 557
| 0
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
snake_case_ = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE )
# read data
snake_case_ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
names.append("""/""".join(_SCREAMING_SNAKE_CASE ) )
arrays.append(_SCREAMING_SNAKE_CASE )
logger.info(f"""Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers""" )
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})""" )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = full_name.split("""/""" )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case_ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """encoder""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """layer""" )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """pooler""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("""weight""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """attention""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """intermediate""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _SCREAMING_SNAKE_CASE ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _SCREAMING_SNAKE_CASE ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
snake_case_ = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
snake_case_ = BertModel(_SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 700
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__lowercase: ClassVar[Features] = Features({"""audio""": Audio()})
__lowercase: ClassVar[Features] = Features({"""transcription""": Value("""string""")})
__lowercase: str = "audio"
__lowercase: str = "transcription"
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCAmelCase_ ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def lowerCAmelCase ( self : List[str] ) ->Dict[str, str]:
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 2
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A_ ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase : List[Any] = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A_ ( ) -> int:
assert _test_patching.open is open
UpperCamelCase : int = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , _lowerCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A_ ( ) -> List[Any]:
# pandas.read_csv is not present in _test_patching
UpperCamelCase : str = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , _lowerCAmelCase ):
pass
def A_ ( ) -> Dict:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
UpperCamelCase : int = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , _lowerCAmelCase ) is None
with patch_submodule(_test_patching , "len" , _lowerCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A_ ( ) -> List[Any]:
UpperCamelCase : int = "__test_patch_submodule_start_and_stop_mock__"
UpperCamelCase : Optional[int] = patch_submodule(_test_patching , "open" , _lowerCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A_ ( ) -> int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase : List[str] = "__test_patch_submodule_successive_join__"
UpperCamelCase : Tuple = "__test_patch_submodule_successive_dirname__"
UpperCamelCase : Tuple = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.join" , _lowerCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _lowerCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A_ ( ) -> Tuple:
UpperCamelCase : str = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , _lowerCAmelCase ):
pass
| 629
|
__lowerCamelCase : Any = 9.8_0_6_6_5
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629
| 1
|
import requests
SCREAMING_SNAKE_CASE__ : int = """""" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE__ : Optional[Any] = """https://api.openweathermap.org/data/2.5/"""
def _A ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ):
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def _A ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ):
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def _A ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ):
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE__ : Dict = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 701
|
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , _a = None , **_a , ) -> None:
_a : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : List[str] = 3
_a : str = do_lower_case
_a : Optional[int] = remove_space
_a : Optional[Any] = keep_accents
_a : Optional[Any] = vocab_file
_a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_a : Optional[Any] = jieba
_a : Optional[int] = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowercase ( self ) -> int:
return len(self.sp_model )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_a : Any = self.__dict__.copy()
_a : Optional[Any] = None
return state
def __setstate__( self , _a ) -> Union[str, Any]:
_a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Dict = {}
_a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a ) -> Any:
if self.remove_space:
_a : Optional[Any] = ''' '''.join(inputs.strip().split() )
else:
_a : Optional[int] = inputs
_a : Optional[int] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_a : Union[str, Any] = unicodedata.normalize('''NFKD''' , _a )
_a : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
_a : Tuple = outputs.lower()
return outputs
def __lowercase ( self , _a ) -> List[str]:
_a : Optional[Any] = self.preprocess_text(_a )
_a : Optional[int] = self.sp_model.encode(_a , out_type=_a )
_a : str = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_a : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : List[Any] = cur_pieces[1:]
else:
_a : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __lowercase ( self , _a ) -> Dict:
return self.sp_model.PieceToId(_a )
def __lowercase ( self , _a ) -> str:
return self.sp_model.IdToPiece(_a )
def __lowercase ( self , _a ) -> List[Any]:
_a : Union[str, Any] = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : str = [self.sep_token_id]
_a : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Dict = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self , *_a , **_a ) -> Tuple:
_a : Any = super()._decode(*_a , **_a )
_a : Union[str, Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 14
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : str = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _A ):
def __init__( self : List[Any] , A : Optional[int] , A : Union[str, Any] , A : int ) -> Any:
lowercase_ : Tuple = dataset
lowercase_ : Optional[int] = process
lowercase_ : Dict = params
def __len__( self : Tuple ) -> Tuple:
return len(self.dataset )
def __getitem__( self : Tuple , A : Optional[int] ) -> int:
lowercase_ : Union[str, Any] = self.dataset[i]
lowercase_ : Optional[Any] = self.process(UpperCamelCase_ , **self.params )
return processed
class _UpperCAmelCase ( _A ):
def __init__( self : List[str] , A : Optional[int] , A : Any , A : Optional[Any] , A : List[Any]=None ) -> Optional[Any]:
lowercase_ : Optional[Any] = loader
lowercase_ : Tuple = infer
lowercase_ : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase_ : Optional[int] = None
lowercase_ : List[str] = loader_batch_size
# Internal bookkeeping
lowercase_ : Optional[Any] = None
lowercase_ : Any = None
def __len__( self : Tuple ) -> Optional[int]:
return len(self.loader )
def __iter__( self : Any ) -> Optional[Any]:
lowercase_ : Tuple = iter(self.loader )
return self
def A ( self : str ) -> Union[str, Any]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase_ : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase_ : Any = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Convert ModelOutput to tuple first
lowercase_ : int = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase_ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase_ : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase_ : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase_ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase_ : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase_ : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase_ : List[str] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase_ : List[str] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase_ : Optional[Any] = self._loader_batch_data.__class__(UpperCamelCase_ )
self._loader_batch_index += 1
return result
def A ( self : str ) -> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase_ : Optional[int] = next(self.iterator )
lowercase_ : int = self.infer(UpperCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase_ : Optional[int] = processed
else:
lowercase_ : Dict = list(processed.keys() )[0]
lowercase_ : Tuple = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ : Any = len(UpperCamelCase_ )
else:
lowercase_ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase_ : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
lowercase_ : Tuple = processed
lowercase_ : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _A ):
def __init__( self : Union[str, Any] , A : str , A : int , A : List[str] , A : Tuple=None ) -> List[Any]:
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __iter__( self : Union[str, Any] ) -> Dict:
lowercase_ : Any = iter(self.loader )
lowercase_ : List[Any] = None
return self
def A ( self : int ) -> str:
if self.subiterator is None:
lowercase_ : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase_ : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase_ : List[str] = self.infer(next(self.iterator ) , **self.params )
lowercase_ : List[Any] = next(self.subiterator )
return processed
class _UpperCAmelCase ( _A ):
def __iter__( self : Dict ) -> Tuple:
lowercase_ : Tuple = iter(self.loader )
return self
def A ( self : Union[str, Any] ) -> Optional[int]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase_ : Union[str, Any] = False
lowercase_ : Any = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase_ : int = self.loader_batch_item()
lowercase_ : Optional[int] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
while not is_last:
lowercase_ : List[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase_ , torch.Tensor ):
lowercase_ : Dict = processed
else:
lowercase_ : Tuple = list(processed.keys() )[0]
lowercase_ : Optional[Any] = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ : Union[str, Any] = len(UpperCamelCase_ )
else:
lowercase_ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase_ : List[Any] = observed_batch_size
lowercase_ : Union[str, Any] = processed
lowercase_ : int = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase_ : int = self.loader_batch_item()
lowercase_ : str = item.pop('''is_last''' )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
else:
lowercase_ : List[Any] = processed
lowercase_ : Optional[int] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase_ )
return accumulator
class _UpperCAmelCase ( _A ):
def __init__( self : Any , A : Optional[int] , A : str ) -> List[str]:
lowercase_ : Union[str, Any] = dataset
lowercase_ : List[str] = key
def __len__( self : Union[str, Any] ) -> Tuple:
return len(self.dataset )
def __getitem__( self : List[str] , A : Optional[int] ) -> Union[str, Any]:
return self.dataset[i][self.key]
class _UpperCAmelCase ( _A ):
def __init__( self : List[Any] , A : int , A : int , A : Tuple ) -> List[str]:
lowercase_ : Union[str, Any] = dataset
lowercase_ : Union[str, Any] = keya
lowercase_ : Optional[Any] = keya
def __len__( self : List[str] ) -> List[str]:
return len(self.dataset )
def __getitem__( self : List[Any] , A : List[Any] ) -> Union[str, Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 710
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowercase_ : Any = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
def lowercase ( __snake_case : Any ):
lowercase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ : Dict = value
else:
lowercase_ : Tuple = value
return new_state_dict
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : Optional[int] = ''''''
if is_panoptic:
lowercase_ : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:2_5_6, :]
lowercase_ : Tuple = in_proj_bias[:2_5_6]
lowercase_ : Optional[Any] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ : str = in_proj_bias[2_5_6:5_1_2]
lowercase_ : str = in_proj_weight[-2_5_6:, :]
lowercase_ : Tuple = in_proj_bias[-2_5_6:]
def lowercase ( ):
lowercase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : List[Any] ):
lowercase_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
lowercase_ : Any = True
lowercase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ : List[Any] = 2_5_0
else:
lowercase_ : List[Any] = 9_1
lowercase_ : List[str] = '''huggingface/label-files'''
lowercase_ : Union[str, Any] = '''coco-detection-id2label.json'''
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ : Tuple = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowercase_ : Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __snake_case , pretrained=__snake_case ).eval()
lowercase_ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ : Union[str, Any] = '''conditional_detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case )
lowercase_ : int = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ : Optional[int] = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : str = state_dict.pop(__snake_case )
lowercase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ : Dict = state_dict.pop(__snake_case )
lowercase_ : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ : Tuple = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase_ : Dict = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ : Optional[int] = conditional_detr(__snake_case )
lowercase_ : List[str] = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = HfArgumentParser(A )
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ = TensorFlowBenchmark(args=A )
try:
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase__ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCAmelCase__ = ''' '''.join(str(A ).split(''' ''' )[:-1] )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = eval(str(A ).split(''' ''' )[-1] )
lowerCAmelCase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A )
if len(A ) > 0:
lowerCAmelCase__ = full_error_msg + begin_error_msg + str(A )
raise ValueError(A )
benchmark.run()
if __name__ == "__main__":
main()
| 90
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Optional[int] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""gptj"""
a : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=5_04_00,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=28,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="gelu_new",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,tie_word_embeddings=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "default",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE,task=__SCREAMING_SNAKE_CASE,patching_specs=__SCREAMING_SNAKE_CASE,use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config,"""pad_token_id""",__SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = -1,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE,batch_size=__SCREAMING_SNAKE_CASE,seq_length=__SCREAMING_SNAKE_CASE,is_pair=__SCREAMING_SNAKE_CASE,framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,dtype=__SCREAMING_SNAKE_CASE )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 689
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase (UpperCamelCase_ , unittest.TestCase ):
_UpperCamelCase = MobileBertTokenizer
_UpperCamelCase = MobileBertTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = filter_non_english
_UpperCamelCase = """google/mobilebert-uncased"""
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
super().setUp()
__lowerCAmelCase : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowerCAmelCase : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase__ ( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = 'UNwant\u00E9d,running'
__lowerCAmelCase : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : int = 'UNwant\u00E9d,running'
__lowerCAmelCase : Tuple = tokenizer.tokenize(__a )
__lowerCAmelCase : int = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : int = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = tokenizer.encode(__a )
__lowerCAmelCase : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
__lowerCAmelCase : Tuple = self.get_tokenizer(do_lower_case=__a )
__lowerCAmelCase : int = self.get_rust_tokenizer(do_lower_case=__a )
__lowerCAmelCase : Optional[int] = 'UNwant\u00E9d,running'
__lowerCAmelCase : str = tokenizer.tokenize(__a )
__lowerCAmelCase : Any = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(__a )
__lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCAmelCase : Optional[Any] = {}
for i, token in enumerate(__a ):
__lowerCAmelCase : str = i
__lowerCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__lowerCAmelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
__lowerCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
__lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase : Dict = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__lowerCAmelCase : Union[str, Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__lowerCAmelCase : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''' ) else False
__lowerCAmelCase : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = ['的', '人', '有']
__lowerCAmelCase : List[str] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Any = True
__lowerCAmelCase : int = self.tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase : List[Any] = tokenizer_p.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : int = tokenizer_r.convert_ids_to_tokens(__a )
__lowerCAmelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase : str = tokenizer_r.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : int = tokenizer_p.encode(__a , add_special_tokens=__a )
__lowerCAmelCase : str = tokenizer_r.convert_ids_to_tokens(__a )
__lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase : Optional[Any] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 717
|
from __future__ import annotations
from cmath import sqrt
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
__lowerCAmelCase : Union[str, Any] = b * b - 4 * a * c
__lowerCAmelCase : Optional[int] = (-b + sqrt(lowercase__ )) / (2 * a)
__lowerCAmelCase : Optional[Any] = (-b - sqrt(lowercase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowercase ( ):
__lowerCAmelCase, __lowerCAmelCase : List[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 583
| 0
|
'''simple docstring'''
import functools
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = len(snake_case_ )
@functools.cache
def min_distance(snake_case_ : int , snake_case_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case_ ) , 1 + min_distance(snake_case_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64
| 0
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = int(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = t // 3600, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def UpperCAmelCase ( A : Any , A : List[Any] , A : List[Any] , A : Union[str, Any] , A : Optional[Any]=300 ):
'''simple docstring'''
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
_UpperCAmelCase = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCAmelCase = f'{elt:.6f}' if isinstance(A , A ) else str(A )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 5
_UpperCAmelCase = 0.2
def __init__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = 300 , ) -> Any:
_UpperCAmelCase = total
_UpperCAmelCase = '' if prefix is None else prefix
_UpperCAmelCase = leave
_UpperCAmelCase = parent
_UpperCAmelCase = width
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowerCamelCase_ ( self , snake_case , snake_case = False , snake_case = None ) -> Tuple:
_UpperCAmelCase = value
if comment is not None:
_UpperCAmelCase = comment
if self.last_value is None:
_UpperCAmelCase = _UpperCAmelCase = time.time()
_UpperCAmelCase = _UpperCAmelCase = value
_UpperCAmelCase = _UpperCAmelCase = None
_UpperCAmelCase = self.warmup
_UpperCAmelCase = 1
self.update_bar(snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCAmelCase = time.time()
_UpperCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCAmelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCAmelCase = None
if value >= self.total:
_UpperCAmelCase = self.total
_UpperCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(snake_case )
_UpperCAmelCase = value
_UpperCAmelCase = current_time
if self.average_time_per_item is None:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCamelCase_ ( self , snake_case , snake_case=None ) -> int:
_UpperCAmelCase = ' ' * (len(str(self.total ) ) - len(str(snake_case ) )) + str(snake_case )
if self.elapsed_time is None:
_UpperCAmelCase = f'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_UpperCAmelCase = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_UpperCAmelCase = (
f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
f' {format_time(self.predicted_remaining )}'
)
self.label += f', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]'
self.display()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=None ) -> Union[str, Any]:
super().__init__(snake_case )
_UpperCAmelCase = None if column_names is None else [column_names]
_UpperCAmelCase = None
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
if self.inner_table is None:
_UpperCAmelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCAmelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(snake_case )
_UpperCAmelCase = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case=300 ) -> Dict:
_UpperCAmelCase = NotebookProgressBar(snake_case , prefix=snake_case , parent=self , width=snake_case )
return self.child_bar
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = None
self.display()
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self ) -> Tuple:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> int:
_UpperCAmelCase = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_UpperCAmelCase = NotebookTrainingTracker(state.max_steps , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> Union[str, Any]:
_UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=f'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_UpperCAmelCase = False
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Dict:
if not has_length(snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCAmelCase = self.training_tracker.add_child(len(snake_case ) )
else:
_UpperCAmelCase = NotebookProgressBar(len(snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> List[str]:
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCAmelCase = None
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Union[str, Any]:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCAmelCase = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCAmelCase = state.global_step
self.training_tracker.write_line(snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> Tuple:
if self.training_tracker is not None:
_UpperCAmelCase = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCAmelCase = log['loss']
break
if self.first_column == "Epoch":
_UpperCAmelCase = int(state.epoch )
else:
_UpperCAmelCase = state.global_step
_UpperCAmelCase = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_UpperCAmelCase = re.sub(r'\_loss$' , '' , snake_case )
_UpperCAmelCase = metrics.pop('total_flos' , snake_case )
_UpperCAmelCase = metrics.pop('epoch' , snake_case )
_UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_runtime' , snake_case )
_UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_samples_per_second' , snake_case )
_UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_steps_per_second' , snake_case )
_UpperCAmelCase = metrics.pop(f'{metric_key_prefix}_jit_compilation_time' , snake_case )
for k, v in metrics.items():
if k == f'{metric_key_prefix}_loss':
_UpperCAmelCase = v
else:
_UpperCAmelCase = k.split('_' )
_UpperCAmelCase = ' '.join([part.capitalize() for part in splits[1:]] )
_UpperCAmelCase = v
self.training_tracker.write_line(snake_case )
self.training_tracker.remove_child()
_UpperCAmelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCAmelCase = True
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , **snake_case ) -> Any:
self.training_tracker.update(
state.global_step , comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=snake_case )
_UpperCAmelCase = None
| 24
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase_ ( _lowerCamelCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 8
# DPR tok
UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase = {'unk_token': '<unk>'}
UpperCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = os.path.join(self.tmpdirname , 'rag_tokenizer' )
UpperCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase_ )
rag_tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = RagTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
UpperCAmelCase = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCAmelCase = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
UpperCAmelCase = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCAmelCase = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 447
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a ( _lowerCamelCase ):
def __init__( self : Optional[int] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ = field
snake_case_ = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ = Json(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , field=lowercase_ , **lowercase_ , )
def A_ ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
snake_case_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class a :
def __init__( self : Dict , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
snake_case_ = dataset
snake_case_ = path_or_buf
snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ = num_proc
snake_case_ = '''utf-8'''
snake_case_ = to_json_kwargs
def A_ ( self : str ):
snake_case_ = self.to_json_kwargs.pop('''path_or_buf''' , lowercase_ )
snake_case_ = self.to_json_kwargs.pop('''orient''' , '''records''' )
snake_case_ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
snake_case_ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
snake_case_ = self.to_json_kwargs.pop('''compression''' , lowercase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowercase_ ) as buffer:
snake_case_ = self._write(file_obj=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
snake_case_ = self._write(
file_obj=self.path_or_buf , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
return written
def A_ ( self : List[str] , lowercase_ : Dict ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = args
snake_case_ = query_table(
table=self.dataset.data , key=slice(lowercase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ = batch.to_pandas().to_json(
path_or_buf=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **lowercase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A_ ( self : Optional[int] , lowercase_ : BinaryIO , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : int , **lowercase_ : Optional[int] , ):
snake_case_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
snake_case_ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase_ )
else:
snake_case_ ,snake_case_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase_ , lowercase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowercase_ )
return written
| 640
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ : List[str] ) -> Any:
lowerCamelCase_ : str = """huggingface/label-files"""
lowerCamelCase_ : Tuple = """imagenet-1k-id2label.json"""
lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ : Optional[int] = {int(A__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
lowerCamelCase_ : Union[str, Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ : Optional[int] = BitConfig(
conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , )
return config
def __lowerCamelCase ( A__ : int ) -> List[Any]:
if "stem.conv" in name:
lowerCamelCase_ : Dict = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase_ : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCamelCase_ : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCamelCase_ : Optional[int] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ : Tuple = """bit.encoder.""" + name
return name
def __lowerCamelCase ( ) -> List[str]:
lowerCamelCase_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : str , A__ : Any=False ) -> Optional[Any]:
lowerCamelCase_ : str = get_config(A__ )
# load original model from timm
lowerCamelCase_ : List[str] = create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ : Optional[Any] = state_dict.pop(A__ )
lowerCamelCase_ : Optional[int] = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCamelCase_ : Tuple = BitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# create image processor
lowerCamelCase_ : Dict = create_transform(**resolve_data_config({} , model=A__ ) )
lowerCamelCase_ : Optional[int] = transform.transforms
lowerCamelCase_ : Optional[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCamelCase_ : str = BitImageProcessor(
do_resize=A__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ : Dict = prepare_img()
lowerCamelCase_ : Any = transform(A__ ).unsqueeze(0 )
lowerCamelCase_ : Any = processor(A__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(A__ )
lowerCamelCase_ : Tuple = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ : Optional[int] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
snake_case__ : Tuple = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "detr"
_a = ["past_key_values"]
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , __a : Dict=True , __a : Union[str, Any]=None , __a : Union[str, Any]=3 , __a : Dict=100 , __a : str=6 , __a : List[str]=2_048 , __a : Any=8 , __a : List[str]=6 , __a : List[str]=2_048 , __a : str=8 , __a : Tuple=0.0 , __a : Dict=0.0 , __a : Optional[int]=True , __a : Union[str, Any]="relu" , __a : Optional[int]=256 , __a : Tuple=0.1 , __a : List[str]=0.0 , __a : Tuple=0.0 , __a : Tuple=0.02 , __a : Optional[Any]=1.0 , __a : List[str]=False , __a : Optional[int]="sine" , __a : Optional[Any]="resnet50" , __a : Optional[int]=True , __a : Dict=False , __a : Union[str, Any]=1 , __a : Optional[Any]=5 , __a : List[Any]=2 , __a : Any=1 , __a : int=1 , __a : List[str]=5 , __a : int=2 , __a : Any=0.1 , **__a : List[Any] , ) ->str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
lowerCamelCase_ : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : List[str] = config_class.from_dict(__a )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = None, None, None
lowerCamelCase_ : Dict = use_timm_backbone
lowerCamelCase_ : Optional[Any] = backbone_config
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : int = num_queries
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : List[str] = encoder_attention_heads
lowerCamelCase_ : Any = decoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_layers
lowerCamelCase_ : List[Any] = decoder_attention_heads
lowerCamelCase_ : Optional[Any] = dropout
lowerCamelCase_ : List[str] = attention_dropout
lowerCamelCase_ : List[Any] = activation_dropout
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : Optional[Any] = init_xavier_std
lowerCamelCase_ : Any = encoder_layerdrop
lowerCamelCase_ : List[Any] = decoder_layerdrop
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : Any = auxiliary_loss
lowerCamelCase_ : Tuple = position_embedding_type
lowerCamelCase_ : Optional[int] = backbone
lowerCamelCase_ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase_ : int = dilation
# Hungarian matcher
lowerCamelCase_ : str = class_cost
lowerCamelCase_ : Union[str, Any] = bbox_cost
lowerCamelCase_ : Tuple = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = mask_loss_coefficient
lowerCamelCase_ : int = dice_loss_coefficient
lowerCamelCase_ : str = bbox_loss_coefficient
lowerCamelCase_ : List[str] = giou_loss_coefficient
lowerCamelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : List[str] ) ->int:
return self.d_model
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : PretrainedConfig , **__a : Dict ) ->Optional[int]:
return cls(backbone_config=__a , **__a )
def _lowerCAmelCase ( self : List[Any] ) ->Dict[str, any]:
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ : List[str] = self.backbone_config.to_dict()
lowerCamelCase_ : Union[str, Any] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->float:
return 1e-5
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return 12
| 171
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
_lowerCamelCase : int = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = None ):
super().__init__()
_lowerCamelCase : Optional[Any] = pad_token_id
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = vocab
_lowerCamelCase : List[str] = merges
_lowerCamelCase : int = BytePairTokenizer(lowercase , lowercase , sequence_length=lowercase )
@classmethod
def A_ ( cls , lowercase , *lowercase , **lowercase ):
_lowerCamelCase : str = [' '.join(lowercase ) for m in tokenizer.bpe_ranks.keys()]
_lowerCamelCase : Union[str, Any] = tokenizer.get_vocab()
return cls(lowercase , lowercase , *lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , *lowercase , **lowercase ):
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase , *lowercase , **lowercase )
return cls.from_tokenizer(lowercase , *lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase ):
return cls(**lowercase )
def A_ ( self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : int = self.tf_tokenizer(lowercase )
_lowerCamelCase : Optional[int] = tf.ones_like(lowercase )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCamelCase : str = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = pad_model_inputs(
lowercase , max_seq_length=lowercase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A =25_00_04
A =25_00_20
@require_sentencepiece
@require_tokenizers
class _a ( __lowercase , unittest.TestCase ):
__a : int = MBartTokenizer
__a : Optional[int] = MBartTokenizerFast
__a : Optional[int] = True
__a : str = True
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = MBartTokenizer(__a , keep_accents=__a )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def A ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__a )
UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__a )
UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__a )
UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(__a )
UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
__a : Dict = '''facebook/mbart-large-en-ro'''
__a : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a : Optional[int] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a : Optional[int] = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def A ( cls : int ):
'''simple docstring'''
UpperCAmelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
UpperCAmelCase = 1
return cls
def A ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def A ( self : Optional[int] ):
'''simple docstring'''
self.assertIn(__a , self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __a )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def A ( self : str ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase = MBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors='''pt''' )
UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='''pt''' )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors='''pt''' )
UpperCAmelCase = targets["""input_ids"""]
UpperCAmelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 720
|
'''simple docstring'''
from __future__ import annotations
import math
class _a :
def __init__( self : Dict , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = size
# approximate the overall size of segment tree with given value
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase = [0 for i in range(0 , 4 * size )]
UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
return idx * 2
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
return idx * 2 + 1
def A ( self : Dict , lowercase : int , lowercase : int , lowercase : int , lowercase : list[int] ):
'''simple docstring'''
if left_element == right_element:
UpperCAmelCase = a[left_element - 1]
else:
UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(lowercase ) , lowercase , lowercase , lowercase )
self.build(self.right(lowercase ) , mid + 1 , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
def A ( self : Union[str, Any] , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase = val
if left_element != right_element:
UpperCAmelCase = val
UpperCAmelCase = val
UpperCAmelCase = True
UpperCAmelCase = True
return True
UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase , lowercase )
self.update(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = max(
self.segment_tree[self.left(lowercase )] , self.segment_tree[self.right(lowercase )] )
return True
def A ( self : Any , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if self.flag[idx] is True:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = False
if left_element != right_element:
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = self.lazy[idx]
UpperCAmelCase = True
UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase = (left_element + right_element) // 2
UpperCAmelCase = self.query(self.left(lowercase ) , lowercase , lowercase , lowercase , lowercase )
UpperCAmelCase = self.query(self.right(lowercase ) , mid + 1 , lowercase , lowercase , lowercase )
return max(lowercase , lowercase )
def __str__( self : Any ):
'''simple docstring'''
return str([self.query(1 , 1 , self.size , lowercase , lowercase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A =15
A =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 358
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if number > 0:
raise ValueError("input must be a negative integer" )
lowercase_ :Optional[Any] = len(bin(__lowerCamelCase )[3:] )
lowercase_ :Optional[int] = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
lowercase_ :Dict = (
(
"1"
+ "0" * (binary_number_length - len(__lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ):
lowercase_ :List[str] = {}
lowercase_ :int = job["started_at"]
lowercase_ :List[Any] = job["completed_at"]
lowercase_ :str = date_parser.parse(__lowerCamelCase )
lowercase_ :List[str] = date_parser.parse(__lowerCamelCase )
lowercase_ :Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase_ :Union[str, Any] = start
lowercase_ :int = end
lowercase_ :Any = duration_in_min
return job_info
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Union[str, Any]=None ):
lowercase_ :Any = None
if token is not None:
lowercase_ :Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": F'Bearer {token}'}
lowercase_ :str = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowercase_ :Optional[int] = requests.get(__lowerCamelCase ,headers=__lowerCamelCase ).json()
lowercase_ :Any = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__lowerCamelCase ) for job in result["jobs"]} )
lowercase_ :Dict = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
lowercase_ :List[Any] = requests.get(url + F'&page={i + 2}' ,headers=__lowerCamelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__lowerCamelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase : Tuple =parser.parse_args()
lowerCAmelCase : int =get_job_time(args.workflow_run_id)
lowerCAmelCase : Optional[int] =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 172
| 1
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase = logging.get_logger("""transformers.models.encodec""")
lowercase = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowercase = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowercase = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowercase = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowercase = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase = []
lowercase = []
def lowerCamelCase_ ( UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
if weight_type is not None:
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
elif weight_type == "running_mean":
UpperCamelCase__ = value
elif weight_type == "running_var":
UpperCamelCase__ = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ = value
elif weight_type == "weight_ih_l0":
UpperCamelCase__ = value
elif weight_type == "weight_hh_l0":
UpperCamelCase__ = value
elif weight_type == "bias_ih_l0":
UpperCamelCase__ = value
elif weight_type == "bias_hh_l0":
UpperCamelCase__ = value
elif weight_type == "weight_ih_l1":
UpperCamelCase__ = value
elif weight_type == "weight_hh_l1":
UpperCamelCase__ = value
elif weight_type == "bias_ih_l1":
UpperCamelCase__ = value
elif weight_type == "bias_hh_l1":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase__ , UpperCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase__ = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase__ = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__, UpperCamelCase__ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCamelCase__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase__ , UpperCamelCase__ = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCamelCase__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''', UpperCamelCase__ )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "weight_ih_l0" in name:
UpperCamelCase__ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
UpperCamelCase__ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
UpperCamelCase__ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
UpperCamelCase__ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
UpperCamelCase__ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
UpperCamelCase__ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
UpperCamelCase__ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
UpperCamelCase__ = '''bias_hh_l1'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
UpperCamelCase__ = '''weight'''
elif "running_mean" in name:
UpperCamelCase__ = '''running_mean'''
elif "running_var" in name:
UpperCamelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
UpperCamelCase__ = '''num_batches_tracked'''
else:
UpperCamelCase__ = None
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : List[str]=None, ):
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ = EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCamelCase__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase__ = [8, 5, 4, 4]
UpperCamelCase__ = [2.2]
UpperCamelCase__ = 64
UpperCamelCase__ = 3_2000
UpperCamelCase__ = 2048
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
elif model_name == "encodec_48khz":
UpperCamelCase__ = [8, 5, 4, 2]
UpperCamelCase__ = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase__ = 4_8000
UpperCamelCase__ = 2
UpperCamelCase__ = False
UpperCamelCase__ = '''time_group_norm'''
UpperCamelCase__ = True
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase__ = EncodecModel(UpperCamelCase__ )
UpperCamelCase__ = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(UpperCamelCase__ )
UpperCamelCase__ = torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase__ = original_checkpoint['''best_state''']
recursively_load_weights(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 591
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase = get_logger()
lowercase = None
class __lowercase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self : List[str] , _a : Optional[Any]=None , _a : Any=None , **_a : List[Any] ):
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase__ = str(jax.devices()[0] )
UpperCamelCase__ = jnp_array_kwargs
@staticmethod
def A_ ( ):
import jax
return {str(_a ): device for device in jax.devices()}
def A_ ( self : Optional[int] , _a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def A_ ( self : Optional[int] , _a : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
UpperCamelCase__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any] , _a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '''__array__''' ) and not isinstance(_a , jax.Array ):
UpperCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def A_ ( self : int , _a : dict ):
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def A_ ( self : List[Any] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_row(_a )
UpperCamelCase__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def A_ ( self : Optional[int] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_column(_a )
UpperCamelCase__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
UpperCamelCase__ = self.recursive_tensorize(_a )
UpperCamelCase__ = self._consolidate(_a )
return column
def A_ ( self : int , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_batch(_a )
UpperCamelCase__ = self.python_features_decoder.decode_batch(_a )
UpperCamelCase__ = self.recursive_tensorize(_a )
for column_name in batch:
UpperCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 591
| 1
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : List[Any]=100 , lowerCamelCase : int=13 , lowerCamelCase : Tuple=30 , lowerCamelCase : int=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : int=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : Dict=4 , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : Union[str, Any]="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : List[Any]=10 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=3 , ) -> str:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = vocab_size
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCamelCase ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitModel(config=lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitForMaskedImageModeling(config=lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxBeitForImageClassification(config=lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxBeitForImageClassification(lowerCamelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(lowerCamelCase )
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCamelCase ( self : str ) -> None:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ):
return model(pixel_values=lowerCamelCase , **lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase , lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
_UpperCAmelCase = np.ones((1, 196) , dtype=lowerCamelCase )
# forward pass
_UpperCAmelCase = model(pixel_values=lowerCamelCase , bool_masked_pos=lowerCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCamelCase )
_UpperCAmelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase , atol=1E-2 ) )
@slow
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""np""" )
# forward pass
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 1000)
self.assertEqual(logits.shape , lowerCamelCase )
_UpperCAmelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
_UpperCAmelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""np""" )
# forward pass
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 2_1841)
self.assertEqual(logits.shape , lowerCamelCase )
_UpperCAmelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
_UpperCAmelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
| 108
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : Optional[Any] = 10
def lowerCAmelCase_ ( a : int , a : int , a : list[int] , a : int ):
for i in range(a , a ):
if array[i] == target:
return i
return -1
def lowerCAmelCase_ ( a : list[int] , a : int ):
a__ = 0
a__ = len(a )
while left <= right:
if right - left < precision:
return lin_search(a , a , a , a )
a__ = (left + right) // 3 + 1
a__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
a__ = one_third - 1
elif array[two_third] < target:
a__ = two_third + 1
else:
a__ = one_third + 1
a__ = two_third - 1
else:
return -1
def lowerCAmelCase_ ( a : int , a : int , a : list[int] , a : int ):
if left < right:
if right - left < precision:
return lin_search(a , a , a , a )
a__ = (left + right) // 3 + 1
a__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a , one_third - 1 , a , a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a , a , a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a , a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : int = input('Enter numbers separated by comma:\n').strip()
__A : int = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__A : Dict = int(input('Enter the number to be found in the list:\n').strip())
__A : Optional[int] = ite_ternary_search(collection, target)
__A : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 394
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case : Dict = PegasusConfig
snake_case : Any = {}
snake_case : int = """gelu"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=40 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFPegasusModel(config=__lowerCAmelCase ).get_decoder()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ = inputs_dict["""head_mask"""]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str , a__ :Optional[int]=None , a__ :Union[str, Any]=None , a__ :Optional[int]=None , a__ :Optional[int]=None , a__ :List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case : Optional[int] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case : Optional[Any] = True
snake_case : int = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : str = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case : List[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case : Dict = """google/pegasus-xsum"""
@cached_property
def _lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.translate_src_text(**__lowerCAmelCase )
assert self.expected_text == generated_words
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.tokenizer(self.src_text , **__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""tf""" )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
UpperCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )
return generated_words
@slow
def _lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 548
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCamelCase__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(a__ )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(a__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 548
| 1
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_snake_case : List[str] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_snake_case : Union[str, Any] = 'main'
# Default branch name
_snake_case : Dict = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_snake_case : Dict = 'aaaaaaa'
# This commit does not exist, so we should 404.
_snake_case : str = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_snake_case : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def snake_case_ ():
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def snake_case_ ():
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> str:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''start_positions''', '''end_positions'''] )
class A ( _a ):
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels'''] )
@require_tf
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''start_positions''', '''end_positions'''] )
class A ( _a ):
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , ['''labels'''] )
@require_flax
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
class A ( _a ):
pass
self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
| 22
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ ( _a):
def _snake_case ( self : Any ) ->Tuple:
"""simple docstring"""
a__ :List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "tf_padding" ) )
self.parent.assertTrue(hasattr(__A , "depth_multiplier" ) )
class lowerCAmelCase_ :
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Optional[int]=13 , __A : Dict=3 , __A : Dict=32 , __A : List[str]=0.25 , __A : Optional[Any]=8 , __A : Any=8 , __A : Any=6 , __A : Optional[int]=32 , __A : Optional[int]=True , __A : Dict=True , __A : int=True , __A : str="relu6" , __A : Any=1280 , __A : int=0.1 , __A : List[str]=0.02 , __A : List[Any]=True , __A : int=True , __A : Union[str, Any]=10 , __A : Union[str, Any]=None , ) ->int:
"""simple docstring"""
a__ :Optional[Any] = parent
a__ :Dict = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Dict = depth_multiplier
a__ :int = depth_divisible_by
a__ :Union[str, Any] = min_depth
a__ :Optional[int] = expand_ratio
a__ :List[Any] = tf_padding
a__ :Union[str, Any] = output_stride
a__ :Union[str, Any] = first_layer_is_expansion
a__ :Any = finegrained_output
a__ :int = hidden_act
a__ :Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
a__ :List[Any] = classifier_dropout_prob
a__ :Dict = use_labels
a__ :Dict = is_training
a__ :Optional[int] = num_labels
a__ :Any = initializer_range
a__ :int = scope
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :str = None
a__ :Any = None
if self.use_labels:
a__ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
a__ :Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ :Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : List[str] ) ->str:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self : Optional[int] , __A : Optional[int] , __A : List[Any] , __A : Tuple , __A : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
a__ :List[str] = MobileNetVaModel(config=__A )
model.to(__A )
model.eval()
a__ :Optional[int] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _snake_case ( self : List[str] , __A : Dict , __A : Union[str, Any] , __A : Dict , __A : List[str] ) ->int:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Optional[int] = MobileNetVaForImageClassification(__A )
model.to(__A )
model.eval()
a__ :Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : str , __A : Any , __A : Any , __A : Optional[Any] , __A : Any ) ->int:
"""simple docstring"""
a__ :Any = self.num_labels
a__ :Dict = MobileNetVaForSemanticSegmentation(__A )
model.to(__A )
model.eval()
a__ :int = model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a__ :List[Any] = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ :int = config_and_inputs
a__ :Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = MobileNetVaModelTester(self )
a__ :List[str] = MobileNetVaConfigTester(self , config_class=__A , has_text_modality=__A )
def _snake_case ( self : str ) ->str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _snake_case ( self : Dict ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self : List[str] ) ->Any:
"""simple docstring"""
a__ , a__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :int = model_class(__A )
a__ :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ :Union[str, Any] = [*signature.parameters.keys()]
a__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : str ) ->Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__A : Union[str, Any] , __A : Dict , __A : Any ):
a__ :int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ :Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
a__ :Tuple = outputs.hidden_states
a__ :Any = 16
self.assertEqual(len(__A ) , __A )
a__ , a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ :Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ :Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self : List[str] ) ->Dict:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
a__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def _snake_case ( self : int ) ->Tuple:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :List[str] = MobileNetVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
a__ :Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Optional[int] ) ->str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
a__ :Any = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(__A )
a__ :List[Any] = self.default_image_processor
a__ :str = prepare_img()
a__ :Dict = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
a__ :Tuple = model(**__A )
# verify the logits
a__ :List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __A )
a__ :Union[str, Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@slow
def _snake_case ( self : Any ) ->Tuple:
"""simple docstring"""
a__ :Any = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a__ :Union[str, Any] = model.to(__A )
a__ :str = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a__ :List[str] = prepare_img()
a__ :str = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
a__ :Dict = model(**__A )
a__ :Optional[int] = outputs.logits
# verify the logits
a__ :int = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __A )
a__ :Optional[Any] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
| 395
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( snake_case : NDArray[floataa] , snake_case : NDArray[floataa] , snake_case : list[int] , snake_case : int , ) -> list[float]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = coefficient_matrix.shape
__UpperCamelCase , __UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
__UpperCamelCase = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(snake_case )
if colsa != 1:
__UpperCamelCase = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(snake_case )
if rowsa != rowsa:
__UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
__UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(snake_case )} and {rowsa}"
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCamelCase , __UpperCamelCase = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
__UpperCamelCase = []
for row in range(snake_case ):
__UpperCamelCase = 0
for col in range(snake_case ):
if col == row:
__UpperCamelCase = table[row][col]
elif col == cols - 1:
__UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCamelCase = (temp + val) / denom
new_val.append(snake_case )
__UpperCamelCase = new_val
return [float(snake_case ) for i in new_val]
def A_ ( snake_case : NDArray[floataa] ) -> bool:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = table.shape
__UpperCamelCase = True
for i in range(0 , snake_case ):
__UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCamelCase = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 451
| 1
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str , *_A : Optional[int] , _A : Optional[int]=None , _A : Dict=None , _A : List[Any]=None , **_A : Optional[Any] ):
"""simple docstring"""
super().__init__(*_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = eval_examples
__SCREAMING_SNAKE_CASE : Optional[Any] = post_process_function
__SCREAMING_SNAKE_CASE : Tuple = quant_trainer_args
__SCREAMING_SNAKE_CASE : int = 128 # default number of calibration samples
def UpperCAmelCase__ ( self : int , _A : Optional[int]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__SCREAMING_SNAKE_CASE : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset
__SCREAMING_SNAKE_CASE : str = self._remove_unused_columns(_A , description='''Calibration''' )
return DataLoader(
_A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , )
def UpperCAmelCase__ ( self : Optional[int] , _A : Tuple=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.train_dataset if calib_dataset is None else calib_dataset
__SCREAMING_SNAKE_CASE : Tuple = self.get_calib_dataloader(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model
quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_A ):
# Prediction step
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.prediction_step(_A , _A , prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE : Dict = model
def UpperCAmelCase__ ( self : List[str] , _A : int=None , _A : Optional[Any]=None , _A : List[Any]=None , _A : str = "eval" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE : Optional[int] = eval_loop(
_A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__SCREAMING_SNAKE_CASE : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions )
__SCREAMING_SNAKE_CASE : str = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__SCREAMING_SNAKE_CASE : Tuple = metrics.pop(_A )
self.log(_A )
else:
__SCREAMING_SNAKE_CASE : str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def UpperCAmelCase__ ( self : Tuple , _A : int , _A : Optional[int] , _A : str=None , _A : str = "test" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE : str = self.compute_metrics
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_loop(
_A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__SCREAMING_SNAKE_CASE : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions , '''predict''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__SCREAMING_SNAKE_CASE : List[str] = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
def UpperCAmelCase__ ( self : Dict , _A : List[Any]="./" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.eval_dataset
__SCREAMING_SNAKE_CASE : int = self.get_eval_dataloader(_A )
__SCREAMING_SNAKE_CASE : str = next(iter(_A ) )
# saving device - to make it consistent
__SCREAMING_SNAKE_CASE : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__SCREAMING_SNAKE_CASE : List[Any] = tuple(v.to(_A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Dict = self.model.to(_A )
model.eval()
model.float()
__SCREAMING_SNAKE_CASE : str = model.module if hasattr(_A , '''module''' ) else model
quant_trainer.configure_model(_A , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
_A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=_A , )
logger.info('''onnx export finished''' )
| 74
|
def __a ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> list:
SCREAMING_SNAKE_CASE : int = length or len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Any = True
return list_data if not swapped else bubble_sort(__lowerCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __A ( ):
"""simple docstring"""
__a = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__a = Dataset.from_dict(_A )
return dataset
class A_ ( a_ ):
def _UpperCAmelCase ( self : Tuple ):
__a = get_dataset()
__a = make_duplicate_clusters(__SCREAMING_SNAKE_CASE , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _UpperCAmelCase ( self : int ):
__a = get_dataset()
__a , __a = deduplicate_dataset(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 2 )
print(__SCREAMING_SNAKE_CASE )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __SCREAMING_SNAKE_CASE )
| 525
|
from ... import PretrainedConfig
SCREAMING_SNAKE_CASE : Any = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_SCREAMING_SNAKE_CASE = """nezha"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str]=2_11_28 , __SCREAMING_SNAKE_CASE : Dict=7_68 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = max_relative_position
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout
__a = use_cache
| 525
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A__( __lowerCAmelCase ):
if hor == 1_28:
_snake_case : Dict = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_snake_case : Any = (32, 1_28, 2_56)
_snake_case : List[Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
_snake_case : Optional[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_snake_case : Union[str, Any] = (32, 64, 1_28, 2_56)
_snake_case : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_snake_case : List[Any] = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
_snake_case : Union[str, Any] = model.state_dict()
_snake_case : Any = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_snake_case : List[Any] = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
_snake_case : Dict = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : List[Any] = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def A__( ):
_snake_case : List[str] = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_snake_case : Optional[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_snake_case : Optional[Any] = model
_snake_case : List[str] = UNetaDModel(**__lowerCAmelCase )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
_snake_case : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 304
|
from math import pow
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case : Any = int(pow(__lowerCAmelCase , __lowerCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case : str = backtrack(
__lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case : Optional[Any] = backtrack(
__lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase )
return current_sum, solutions_count
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(__lowerCAmelCase , __lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCamelCase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> List[Any]:
lowercase : Union[str, Any] ={}
state_dict.pop('''pixel_mean''' , A_ )
state_dict.pop('''pixel_std''' , A_ )
lowercase : List[str] =R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase : int =key.replace(A_ , A_ )
if re.match(A_ , A_ ):
lowercase : str =int(re.match(A_ , A_ ).group(2 ) )
if layer_nb == 0:
lowercase : Optional[int] =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowercase : Optional[int] =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowercase : List[str] =key.replace('''layers.2''' , '''proj_out''' )
lowercase : Dict =value
lowercase : Dict =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple="ybelkada/segment-anything" ) -> Optional[Any]:
lowercase : List[Any] =hf_hub_download(A_ , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase : Tuple =SamConfig()
elif "sam_vit_l" in model_name:
lowercase : Dict =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase : Dict =SamConfig(
vision_config=A_ , )
elif "sam_vit_h" in model_name:
lowercase : int =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase : Optional[Any] =SamConfig(
vision_config=A_ , )
lowercase : List[str] =torch.load(A_ , map_location='''cpu''' )
lowercase : Optional[int] =replace_keys(A_ )
lowercase : List[str] =SamImageProcessor()
lowercase : Dict =SamProcessor(image_processor=A_ )
lowercase : int =SamModel(A_ )
hf_model.load_state_dict(A_ )
lowercase : Optional[Any] =hf_model.to('''cuda''' )
lowercase : Optional[int] ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowercase : Dict =Image.open(requests.get(A_ , stream=A_ ).raw ).convert('''RGB''' )
lowercase : Tuple =[[[400, 650]]]
lowercase : List[Any] =[[1]]
lowercase : Dict =processor(images=np.array(A_ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase : List[Any] =hf_model(**A_ )
lowercase : List[Any] =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowercase : str =processor(
images=np.array(A_ ) , input_points=A_ , input_labels=A_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase : int =hf_model(**A_ )
lowercase : Dict =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowercase : str =((75, 275, 1725, 850),)
lowercase : List[Any] =processor(images=np.array(A_ ) , input_boxes=A_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase : Tuple =hf_model(**A_ )
lowercase : Optional[Any] =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowercase : Union[str, Any] =[[[400, 650], [800, 650]]]
lowercase : int =[[1, 1]]
lowercase : Tuple =processor(
images=np.array(A_ ) , input_points=A_ , input_labels=A_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowercase : Tuple =hf_model(**A_ )
lowercase : List[Any] =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
UpperCamelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 710
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Any =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
'''simple docstring'''
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
lowercase : Optional[int] =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Optional[int] =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase : str =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Dict =self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Dict =self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
lowercase : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : List[str] =self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 88
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A = logging.get_logger(__name__)
__A = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'dpt'
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Optional[Any]=384 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=[2, 5, 8, 11] , __SCREAMING_SNAKE_CASE : Dict="project" , __SCREAMING_SNAKE_CASE : int=[4, 2, 1, 0.5] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[96, 192, 384, 768] , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : int=-1 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=0.4 , __SCREAMING_SNAKE_CASE : Any=255 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 1024, 24, 24] , __SCREAMING_SNAKE_CASE : Tuple=[0, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =hidden_size
__UpperCAmelCase =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
__UpperCAmelCase ={
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
__UpperCAmelCase =BitConfig(**__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
__UpperCAmelCase =backbone_featmap_shape
__UpperCAmelCase =neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =[]
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
__UpperCAmelCase =readout_type
__UpperCAmelCase =reassemble_factors
__UpperCAmelCase =neck_hidden_sizes
__UpperCAmelCase =fusion_hidden_size
__UpperCAmelCase =head_in_index
__UpperCAmelCase =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__UpperCAmelCase =use_auxiliary_head
__UpperCAmelCase =auxiliary_loss_weight
__UpperCAmelCase =semantic_loss_ignore_index
__UpperCAmelCase =semantic_classifier_dropout
def _a ( self : str ) -> Any:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase =self.backbone_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 68
|
def lowercase__ ( A_: int , A_: int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
lowercase__ = [True] * limit
lowercase__ = False
lowercase__ = False
lowercase__ = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ = i * 2
while index < limit:
lowercase__ = False
lowercase__ = index + i
lowercase__ = [2]
for i in range(3 ,_SCREAMING_SNAKE_CASE ,2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def lowerCamelCase ( _snake_case : List[str] = 1_000_000 ):
'''simple docstring'''
lowercase__ = prime_sieve(_SCREAMING_SNAKE_CASE )
lowercase__ = 0
lowercase__ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length ,len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ = j - i
lowercase__ = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = "0"
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
SCREAMING_SNAKE_CASE__ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
SCREAMING_SNAKE_CASE__ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE__ = ort.RunOptions()
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = 2000
SCREAMING_SNAKE_CASE__ = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 539
| 0
|
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : Tuple = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
__lowerCamelCase : Any = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
__lowerCamelCase : List[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self )-> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> int:
'''simple docstring'''
A__ : int =spearmanr(__UpperCamelCase , __UpperCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 416
|
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Tuple =1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Optional[Any] =0
while number > 0:
A__ : List[Any] =number % 1_0
sum_of_digits += last_digit
A__ : str =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
A__ : List[str] =factorial(snake_case_ )
A__ : str =split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 416
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : int ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Any ,**A_ : List[Any] ) -> int:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> str:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = ['''flax''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Tuple ) -> Any:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : str ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[Any] ,**A_ : List[str] ) -> int:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : int ,*A_ : List[Any] ,**A_ : int ) -> str:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : str ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Any ) -> Optional[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Union[str, Any] ,*A_ : Tuple ,**A_ : List[Any] ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : str ,**A_ : Tuple ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : int ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> List[Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : int ,**A_ : List[str] ) -> List[str]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> Tuple:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : int ,*A_ : Tuple ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> Tuple:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : int ) -> Optional[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''flax''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Optional[int] ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Dict ,**A_ : Optional[Any] ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Optional[int] ,*A_ : Optional[int] ,**A_ : Dict ) -> List[str]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : str ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''flax''']
def __init__( self : List[str] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[str] ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Union[str, Any] ,**A_ : Any ) -> Optional[int]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : str ,**A_ : Union[str, Any] ) -> Dict:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : List[Any] ,**A_ : int ) -> List[Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Optional[Any] ) -> Optional[int]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : List[Any] ,*A_ : List[str] ,**A_ : List[str] ) -> str:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Any ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = ['''flax''']
def __init__( self : List[str] ,*A_ : List[Any] ,**A_ : Dict ) -> Dict:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Any:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] )
| 22
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 22
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = """data2vec-audio"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=32 , lowerCamelCase__ :Optional[Any]=7_68 , lowerCamelCase__ :int=12 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :List[str]=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[str]=0.1 , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :List[str]=1e-5 , lowerCamelCase__ :Union[str, Any]="gelu" , lowerCamelCase__ :Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ :int=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ :Dict=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :Union[str, Any]=19 , lowerCamelCase__ :int=5 , lowerCamelCase__ :Dict=0.05 , lowerCamelCase__ :Union[str, Any]=10 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :Dict=10 , lowerCamelCase__ :List[str]=0 , lowerCamelCase__ :Dict="sum" , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Dict=2_56 , lowerCamelCase__ :Any=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase__ :int=(5, 3, 3, 1, 1) , lowerCamelCase__ :Union[str, Any]=(1, 2, 3, 1, 1) , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=None , **lowerCamelCase__ :Any , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :int = feat_extract_activation
UpperCamelCase__ :str = list(lowerCamelCase__ )
UpperCamelCase__ :Tuple = list(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = list(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = conv_bias
UpperCamelCase__ :Any = num_conv_pos_embeddings
UpperCamelCase__ :Optional[Any] = num_conv_pos_embedding_groups
UpperCamelCase__ :Optional[Any] = conv_pos_kernel_size
UpperCamelCase__ :int = len(self.conv_dim )
UpperCamelCase__ :List[str] = num_hidden_layers
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :int = hidden_dropout
UpperCamelCase__ :Dict = attention_dropout
UpperCamelCase__ :Any = activation_dropout
UpperCamelCase__ :int = feat_proj_dropout
UpperCamelCase__ :List[Any] = final_dropout
UpperCamelCase__ :List[Any] = layerdrop
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Any = initializer_range
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ :Tuple = mask_time_prob
UpperCamelCase__ :Union[str, Any] = mask_time_length
UpperCamelCase__ :Any = mask_time_min_masks
UpperCamelCase__ :Tuple = mask_feature_prob
UpperCamelCase__ :Optional[Any] = mask_feature_length
UpperCamelCase__ :Tuple = mask_feature_min_masks
# ctc loss
UpperCamelCase__ :List[str] = ctc_loss_reduction
UpperCamelCase__ :Optional[int] = ctc_zero_infinity
# adapter
UpperCamelCase__ :Optional[int] = add_adapter
UpperCamelCase__ :List[str] = adapter_kernel_size
UpperCamelCase__ :Dict = adapter_stride
UpperCamelCase__ :Optional[Any] = num_adapter_layers
UpperCamelCase__ :Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ :Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ :List[str] = list(lowerCamelCase__ )
UpperCamelCase__ :int = list(lowerCamelCase__ )
UpperCamelCase__ :List[str] = list(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = xvector_output_dim
@property
def __a ( self :Optional[Any] ):
return math.prod(self.conv_stride )
| 45
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 2
| 0
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_SCREAMING_SNAKE_CASE : List[Any] = pytest.mark.integration
@require_faiss
class a ( __lowercase ):
def UpperCamelCase ( self : Dict ) -> int:
lowerCamelCase_ = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__SCREAMING_SNAKE_CASE ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase ( self : Tuple ) -> Any:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase ( self : int ) -> List[str]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__SCREAMING_SNAKE_CASE ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__SCREAMING_SNAKE_CASE , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase ( self : Optional[Any] ) -> int:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( __lowercase ):
def UpperCamelCase ( self : Optional[int] ) -> str:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ = index.search(__SCREAMING_SNAKE_CASE )
self.assertRaises(__SCREAMING_SNAKE_CASE , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ = index.search_batch(__SCREAMING_SNAKE_CASE )
self.assertRaises(__SCREAMING_SNAKE_CASE , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=__SCREAMING_SNAKE_CASE )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase ( self : Any ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__SCREAMING_SNAKE_CASE ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ = index.search(__SCREAMING_SNAKE_CASE )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'''mock://{index_name}'''
index.save(_lowerCamelCase , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(_lowerCamelCase , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ = index.search(_lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( __lowercase ):
def UpperCamelCase ( self : List[str] ) -> str:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=__SCREAMING_SNAKE_CASE )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ = index.search(__SCREAMING_SNAKE_CASE )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ = index.search(__SCREAMING_SNAKE_CASE , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ = index.search_batch(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([1, 1, 1] , __SCREAMING_SNAKE_CASE )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ = index.search_batch(__SCREAMING_SNAKE_CASE , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([1, 1, 1] , __SCREAMING_SNAKE_CASE )
| 706
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = [10, 20, 30, 40, 50, 60]
lowerCamelCase_ = [2, 4, 6, 8, 10, 12]
lowerCamelCase_ = 100
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 210 )
def UpperCamelCase ( self : Tuple ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : Dict ) -> str:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Weight can not be negative.' )
def UpperCamelCase ( self : List[str] ) -> Any:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'Profit can not be negative.' )
def UpperCamelCase ( self : Any ) -> Dict:
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , 'max_weight must greater than zero.' )
def UpperCamelCase ( self : List[str] ) -> str:
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 137
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any] )-> Optional[Any]:
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCamelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
__UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def A ( self : List[Any] , **A_ : Any )-> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : int , **A_ : Dict )-> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Dict , **A_ : int )-> Union[str, Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[Any] )-> Tuple:
shutil.rmtree(self.tmpdirname )
def A ( self : str )-> str:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : str )-> Tuple:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def A ( self : Dict )-> Union[str, Any]:
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCamelCase = self.get_image_processor(do_normalize=A_ )
__UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def A ( self : List[str] )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(A_ , return_tensors="np" )
__UpperCamelCase = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : List[str] )-> List[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = processor(text=A_ )
__UpperCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : str )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Tuple )-> Dict:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def A ( self : Dict )-> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 505
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 505
| 1
|
"""simple docstring"""
A_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case : Optional[Any] = year // 1_00
_snake_case : Optional[Any] = (5 * (century % 4) + 2) % 7
_snake_case : Tuple = year % 1_00
_snake_case : Optional[Any] = centurian % 12
_snake_case : int = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case : Dict = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: Optional[Any], a_: Union[str, Any], a_: int=100, a_: int=13, a_: List[Any]=30, a_: str=2, a_: Optional[Any]=3, a_: Optional[int]=True, a_: Any=True, a_: Optional[Any]=32, a_: Tuple=4, a_: str=4, a_: List[Any]=37, a_: List[str]="gelu", a_: str=0.1, a_: Optional[int]=0.1, a_: Any=10, a_: List[str]=0.02, a_: Dict=3, a_: str=None, a_: Optional[int]=[0, 1, 2, 3], ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = 100
_snake_case : Any = batch_size
_snake_case : List[Any] = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = is_training
_snake_case : Tuple = use_labels
_snake_case : Any = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Any = initializer_range
_snake_case : List[str] = scope
_snake_case : int = out_indices
_snake_case : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Dict = (image_size // patch_size) ** 2
_snake_case : str = num_patches + 1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
_snake_case : Tuple = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
_snake_case : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
_snake_case : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=a_, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Any, a_: Optional[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: str, a_: List[Any], a_: Optional[Any], a_: Optional[int], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase_ ( self: Any, a_: List[str], a_: Any, a_: List[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.type_sequence_label_size
_snake_case : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Any = 1
_snake_case : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : Optional[Any] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[int], a_: List[Any], a_: str, a_: int ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_snake_case : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_snake_case : str = model(a_, labels=a_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = BeitModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
_snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_, nn.Linear ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(a_ )
_snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_snake_case : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Dict = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : List[Any] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_snake_case : Dict = False
_snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_snake_case : Any = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : int = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(a_ )
_snake_case : Dict = self.default_image_processor
_snake_case : Dict = prepare_img()
_snake_case : List[str] = image_processor(images=a_, return_tensors="""pt""" ).pixel_values.to(a_ )
# prepare bool_masked_pos
_snake_case : Optional[int] = torch.ones((1, 196), dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(pixel_values=a_, bool_masked_pos=a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], a_, atol=1E-2 ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(a_ )
_snake_case : List[Any] = self.default_image_processor
_snake_case : Any = prepare_img()
_snake_case : Any = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : int = model(**a_ )
_snake_case : Optional[int] = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, a_ )
_snake_case : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : str = 281
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
a_ )
_snake_case : int = self.default_image_processor
_snake_case : Optional[Any] = prepare_img()
_snake_case : Union[str, Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**a_ )
_snake_case : Dict = outputs.logits
# verify the logits
_snake_case : Tuple = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3], a_, atol=1E-4 ) )
_snake_case : List[str] = 2_396
self.assertEqual(logits.argmax(-1 ).item(), a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : int = model.to(a_ )
_snake_case : List[str] = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : Union[str, Any] = Image.open(ds[0]["""file"""] )
_snake_case : List[Any] = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits
# verify the logits
_snake_case : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, a_ )
_snake_case : Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_snake_case : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
], device=a_, )
else:
_snake_case : Optional[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
], device=a_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], a_, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_snake_case : List[Any] = model.to(a_ )
_snake_case : Tuple = BeitImageProcessor(do_resize=a_, size=640, do_center_crop=a_ )
_snake_case : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""", split="""test""" )
_snake_case : str = Image.open(ds[0]["""file"""] )
_snake_case : Tuple = image_processor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
_snake_case : Union[str, Any] = outputs.logits.detach().cpu()
_snake_case : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_, target_sizes=[(500, 300)] )
_snake_case : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, a_ )
_snake_case : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
_snake_case : List[str] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, a_ )
| 28
| 0
|
"""simple docstring"""
from itertools import count
def __UpperCAmelCase ( lowercase = 50 ):
"""simple docstring"""
_UpperCAmelCase = [1] * min_block_length
for n in count(UpperCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase_ ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case (_a ):
lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ = "document_qa"
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ["image", "text"]
lowerCAmelCase__ = ["text"]
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 429
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCAmelCase ( A__: Any , A__: str=10 ) -> Union[str, Any]:
__lowerCamelCase : str = []
for _ in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCAmelCase ( A__: List[str] , A__: str=10 ) -> Optional[int]:
__lowerCamelCase : str = []
for step in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Dict = os.path.join(A__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , A__ )
__lowerCamelCase : Union[str, Any] = torch.load(A__ )
scheduler.load_state_dict(A__ )
return lrs
@require_torch
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self , __a , __a , __a ):
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
__lowerCamelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase : Optional[Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCamelCase : str = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
__lowerCamelCase : int = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase : str = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__a , weight_decay=0.0 , relative_step=__a , scale_parameter=__a , warmup_init=__a , )
for _ in range(1000 ):
__lowerCamelCase : int = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowercase( unittest.TestCase ):
'''simple docstring'''
__a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
__a : List[str] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__a : List[Any] = 10
def snake_case_ ( self , __a , __a , __a , __a=None ):
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a , msg=__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase : str = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase : int = data
__lowerCamelCase : List[Any] = scheduler_func(self.optimizer , **__a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase : Tuple = unwrap_schedule(__a , self.num_steps )
self.assertListAlmostEqual(
__a , __a , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__lowerCamelCase : Any = scheduler_func(self.optimizer , **__a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__a ) # wrap to test picklability of the schedule
__lowerCamelCase : str = unwrap_and_save_reload_schedule(__a , self.num_steps )
self.assertListEqual(__a , __a , msg=f'''failed for {scheduler_func} in save and reload''' )
class __lowercase:
'''simple docstring'''
def __init__( self , __a ):
__lowerCamelCase : Optional[Any] = fn
def __call__( self , *__a , **__a ):
return self.fn(*__a , **__a )
@classmethod
def snake_case_ ( self , __a ):
__lowerCamelCase : Optional[Any] = list(map(self , scheduler.lr_lambdas ) )
| 707
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ):
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__lowerCamelCase : List[Any] = field
__lowerCamelCase : Dict = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
__lowerCamelCase : str = Json(
cache_dir=__a , data_files=__a , features=__a , field=__a , **__a , )
def snake_case_ ( self ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : Any = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__lowerCamelCase : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class __lowercase:
'''simple docstring'''
def __init__( self , __a , __a , __a = None , __a = None , **__a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : Optional[int] = path_or_buf
__lowerCamelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase : str = num_proc
__lowerCamelCase : Optional[Any] = 'utf-8'
__lowerCamelCase : Optional[int] = to_json_kwargs
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('path_or_buf' , __a )
__lowerCamelCase : Any = self.to_json_kwargs.pop('orient' , 'records' )
__lowerCamelCase : List[Any] = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__lowerCamelCase : int = self.to_json_kwargs.pop('compression' , __a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__a ) as buffer:
__lowerCamelCase : int = self._write(file_obj=__a , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
__lowerCamelCase : Dict = self._write(
file_obj=self.path_or_buf , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
return written
def snake_case_ ( self , __a ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = args
__lowerCamelCase : int = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase : List[str] = batch.to_pandas().to_json(
path_or_buf=__a , orient=__a , lines=__a , index=__a , **__a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case_ ( self , __a , __a , __a , __a , **__a , ):
__lowerCamelCase : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__lowerCamelCase : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__a )
else:
__lowerCamelCase , __lowerCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__a )
return written
| 263
| 0
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = CustomTokenizer
pass
| 396
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_UpperCamelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowercase( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case ( self: Optional[Any] ,a: Optional[int] ,a: Tuple ,a: Tuple ):
__UpperCAmelCase = ZeroShotClassificationPipeline(
model=a ,tokenizer=a ,candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case ( self: int ,a: Union[str, Any] ,a: List[str] ):
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics' )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# No kwarg
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,['politics'] )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics'] )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics, public health' )
self.assertEqual(
a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
__UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health'] )
self.assertEqual(
a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 )
__UpperCAmelCase = classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='This text is about {}' )
self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCAmelCase = classifier(['I am happy'] ,['positive', 'negative'] )
self.assertEqual(
a ,[
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(1 )
] ,)
__UpperCAmelCase = classifier(['I am happy', 'I am sad'] ,['positive', 'negative'] )
self.assertEqual(
a ,[
{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]}
for i in range(2 )
] ,)
with self.assertRaises(a ):
classifier('' ,candidate_labels='politics' )
with self.assertRaises(a ):
classifier(a ,candidate_labels='politics' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' ,candidate_labels='' )
with self.assertRaises(a ):
classifier('Who are you voting for in 2020?' ,candidate_labels=a )
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='Not formatting template' ,)
with self.assertRaises(a ):
classifier(
'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template=a ,)
self.run_entailment_id(a )
def snake_case ( self: int ,a: Pipeline ):
__UpperCAmelCase = zero_shot_classifier.model.config
__UpperCAmelCase = config.labelaid
__UpperCAmelCase = zero_shot_classifier.entailment_id
__UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
__UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
__UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
__UpperCAmelCase = original_labelaid
self.assertEqual(a ,zero_shot_classifier.entailment_id )
@require_torch
def snake_case ( self: List[Any] ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 ,candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def snake_case ( self: Tuple ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,)
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@require_tf
def snake_case ( self: int ):
__UpperCAmelCase = pipeline(
'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='tf' ,)
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} ,)
@slow
@require_torch
def snake_case ( self: int ):
__UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='pt' )
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
__UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,)
self.assertEqual(
nested_simplify(a ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
@slow
@require_tf
def snake_case ( self: str ):
__UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='tf' )
__UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(a ) ,{
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} ,)
__UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,)
self.assertEqual(
nested_simplify(a ) ,{
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} ,)
| 396
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a : str = '''bart'''
a : Optional[int] = True
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowercase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__lowercase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__lowercase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__lowercase = qar_model.eval()
else:
__lowercase , __lowercase = (None, None)
if MODEL_TYPE == "bart":
__lowercase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__lowercase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__lowercase = sas_model.eval()
else:
__lowercase , __lowercase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowercase_ ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__lowercase = faiss.StandardGpuResources()
__lowercase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__lowercase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
__lowercase = faiss.IndexFlatIP(1_28 )
__lowercase = faiss.index_cpu_to_gpu(_UpperCamelCase , 1 , _UpperCamelCase )
wikiaab_gpu_index_flat.add(_UpperCamelCase ) # TODO fix for larger GPU
else:
__lowercase , __lowercase = (None, None)
__lowercase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCamelCase )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__lowercase = elia['''train_eli5''']
__lowercase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
__lowercase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(_UpperCamelCase )
return (elia_train, eli5_train_q_index)
a , a , a : Dict = load_indexes()
a , a , a , a : Optional[int] = load_models()
a , a : List[str] = load_train_data()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=10 ):
'''simple docstring'''
__lowercase = embed_questions_for_retrieval([question] , _UpperCamelCase , _UpperCamelCase )
__lowercase , __lowercase = eli5_train_q_index.search(_UpperCamelCase , _UpperCamelCase )
__lowercase = [elia_train[int(_UpperCamelCase )] for i in I[0]]
return nn_examples
def lowercase_ ( _UpperCamelCase , _UpperCamelCase="wiki40b" , _UpperCamelCase="dense" , _UpperCamelCase=10 ):
'''simple docstring'''
if source == "none":
__lowercase , __lowercase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowercase , __lowercase = query_qa_dense_index(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__lowercase , __lowercase = query_es_index(
_UpperCamelCase , _UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCamelCase , )
__lowercase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__lowercase = '''question: {} context: {}'''.format(_UpperCamelCase , _UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCamelCase : None),
} )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=64 , _UpperCamelCase=2_56 , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=0.95 , _UpperCamelCase=0.8 ):
'''simple docstring'''
with torch.no_grad():
__lowercase = qa_sas_generate(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_answers=1 , num_beams=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase , do_sample=_UpperCamelCase , temp=_UpperCamelCase , top_p=_UpperCamelCase , top_k=_UpperCamelCase , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
a : Any = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
a : List[str] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a : Any = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
a : int = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
a : Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
a : Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
a : str = action_list.index(action_st)
a : List[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
a : Optional[Any] = show_type == '''Show full text of passages'''
else:
a : List[str] = 3
a : List[str] = True
a : int = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
a : Dict = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
a : Optional[int] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
a : Any = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
a : str = '''wiki40b'''
a : List[Any] = '''dense'''
a : Union[str, Any] = '''beam'''
a : str = 2
a : Tuple = 64
a : Optional[int] = 256
a : int = None
a : Dict = None
a : List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
a : List[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
a : str = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
a : List[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
a : Tuple = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
a : str = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a : List[Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a : Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a : int = None
# start main text
a : List[str] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
a : Dict = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a : List[Any] = st.text_input('''Enter your question here:''', '''''')
else:
a : Union[str, Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
a , a : Any = make_support(question, source=wiki_source, method='''dense''', n_results=10)
a , a : Optional[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
a : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a : Union[str, Any] = support_list[:10]
a : Tuple = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
a , a : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
a , a : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
a : Union[str, Any] = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
a : str = res[1].strip()
if sec_titles == "":
a : int = '''[{}]({})'''.format(res[0], wiki_url)
else:
a : Optional[int] = sec_titles.split(''' & ''')
a : Tuple = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
a : Union[str, Any] = find_nearest_training(question)
a : Union[str, Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
a : Optional[int] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
a : Optional[int] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 527
|
import unittest
from knapsack import knapsack as k
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 5 )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 527
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> List[Any]:
'''simple docstring'''
return x + 2
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''x = 3'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
__snake_case = '''x = y'''
__snake_case = {'''y''': 5}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 5, '''y''': 5} )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''y = add_two(x)'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''x = 3'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''x = 3\ny = 5'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''text = f\'This is x: {x}.\''''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 2} )
__snake_case = {'''x''': 8}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 8, '''y''': 5} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''test_list = [x, add_two(x)]'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''y = x'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 3} )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = '''test_list = [x, add_two(x)]\ntest_list[1]'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
__snake_case = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
__snake_case = {'''x''': 3}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = '''x = 0\nfor i in range(3):\n x = i'''
__snake_case = {}
__snake_case = evaluate(__SCREAMING_SNAKE_CASE , {'''range''': range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 2, '''i''': 2} )
| 24
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( snake_case ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
_UpperCAmelCase = {}
def __A ( self , a__ , *a__ , **a__ ):
_UpperCAmelCase = super().add_tokens(a__ , *a__ , **a__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __A ( self , a__ , *a__ , a__=1 , **a__ ):
_UpperCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
else:
_UpperCAmelCase = []
for i in range(a__ ):
_UpperCAmelCase = placeholder_token + f"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
_UpperCAmelCase = output
def __A ( self , a__ , a__=False , a__=1.0 ):
if isinstance(a__ , a__ ):
_UpperCAmelCase = []
for i in range(len(a__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase = self.token_map[placeholder_token]
_UpperCAmelCase = tokens[: 1 + int(len(a__ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase = copy.copy(a__ )
random.shuffle(a__ )
_UpperCAmelCase = text.replace(a__ , ' '.join(a__ ) )
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
def __A ( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
| 714
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCAmelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
lowerCAmelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case )
class lowerCAmelCase :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
a__ , a__ , padding=a__ , truncation=a__ , max_length=a__ , return_tensors=a__ , return_attention_mask=a__ , **a__ , )
_UpperCAmelCase = titles if not isinstance(a__ , a__ ) else [titles]
_UpperCAmelCase = texts if not isinstance(a__ , a__ ) else [texts]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = questions if not isinstance(a__ , a__ ) else [questions] * n_passages
assert len(a__ ) == len(
a__ ), f"""There should be as many titles than texts but got {len(a__ )} titles and {len(a__ )} texts."""
_UpperCAmelCase = super().__call__(a__ , a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = super().__call__(a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ )['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(a__ , a__ )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(a__ , padding=a__ , max_length=a__ , return_tensors=a__ )
def __A ( self , a__ , a__ , a__ = 16 , a__ = 64 , a__ = 4 , ):
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = sorted(range(a__ ) , reverse=a__ , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=a__ , top_spans=a__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=a__ , start_index=a__ , end_index=a__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(a__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , a__ , a__ , a__ , a__ , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(a__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(a__ , key=lambda a__ : x[1] , reverse=a__ )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(a__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class lowerCAmelCase ( snake_case , snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = DPRReaderTokenizer
| 494
| 0
|
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
random.seed(snake_case_ )
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# ^^ safe to call this function even if cuda is not available
class a :
def __init__( self , __UpperCamelCase , __UpperCamelCase = 0.9999 , __UpperCamelCase = 0.0 , __UpperCamelCase = 0 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , __UpperCamelCase = 2 / 3 , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> int:
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.nn.Module ):
A__ : str =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : str =True
if kwargs.get('''max_value''' , __UpperCamelCase ) is not None:
A__ : List[str] ='''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
A__ : Union[str, Any] =kwargs['''max_value''']
if kwargs.get('''min_value''' , __UpperCamelCase ) is not None:
A__ : int ='''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
A__ : Union[str, Any] =kwargs['''min_value''']
A__ : List[str] =list(__UpperCamelCase )
A__ : Dict =[p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __UpperCamelCase ) is not None:
A__ : Any ='''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['''device'''] )
A__ : List[Any] =None
A__ : Optional[int] =decay
A__ : int =min_decay
A__ : int =update_after_step
A__ : Optional[int] =use_ema_warmup
A__ : Tuple =inv_gamma
A__ : Union[str, Any] =power
A__ : Optional[Any] =0
A__ : Union[str, Any] =None # set in `step()`
A__ : Optional[int] =model_cls
A__ : Dict =model_config
@classmethod
def lowerCAmelCase_ ( cls , __UpperCamelCase , __UpperCamelCase )-> "EMAModel":
'''simple docstring'''
A__ , A__ : Dict =model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
A__ : Tuple =model_cls.from_pretrained(__UpperCamelCase )
A__ : List[Any] =cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowerCAmelCase_ ( self , __UpperCamelCase )-> List[str]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
A__ : Tuple =self.model_cls.from_config(self.model_config )
A__ : int =self.state_dict()
state_dict.pop('''shadow_params''' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> float:
'''simple docstring'''
A__ : List[Any] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : Optional[int] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Dict =(1 + step) / (10 + step)
A__ : int =min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
A__ : Optional[int] =max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self , __UpperCamelCase )-> Dict:
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.nn.Module ):
A__ : str =(
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
A__ : Union[str, Any] =parameters.parameters()
A__ : List[str] =list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Optional[Any] =self.get_decay(self.optimization_step )
A__ : Any =decay
A__ : List[Any] =1 - decay
A__ : Tuple =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : Optional[Any] =deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
A__ : List[Any] =list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None )-> None:
'''simple docstring'''
A__ : Union[str, Any] =[
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self )-> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
A__ : List[Any] =[param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowerCAmelCase_ ( self , __UpperCamelCase )-> None:
'''simple docstring'''
A__ : List[str] =copy.deepcopy(__UpperCamelCase )
A__ : Any =state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
A__ : Optional[int] =state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('''Invalid min_decay''' )
A__ : List[Any] =state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('''Invalid optimization_step''' )
A__ : str =state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('''Invalid update_after_step''' )
A__ : int =state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
A__ : str =state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
A__ : List[Any] =state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
A__ : Union[str, Any] =state_dict.get('''shadow_params''' , __UpperCamelCase )
if shadow_params is not None:
A__ : str =shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 416
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowerCamelCase_ : Optional[int] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowerCamelCase_ : Union[str, Any] = "</w>"
lowerCamelCase_ : Dict = "@@ "
def __lowercase( __snake_case : List[str] ) -> Optional[int]:
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase_ : Optional[Any] = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
__snake_case = json.load(SCREAMING_SNAKE_CASE_ )
__snake_case = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
__snake_case = None
__snake_case = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
__snake_case = merges_handle.read().split('\n' )[:-1]
__snake_case = [tuple(merge.split()[:2] ) for merge in merges]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {}
@property
def __lowerCamelCase ( self ):
return len(self.decoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__snake_case = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__snake_case = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__snake_case = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(SCREAMING_SNAKE_CASE_ )
__snake_case = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__snake_case = get_pairs(SCREAMING_SNAKE_CASE_ )
__snake_case = ' '.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
__snake_case = '\n' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
__snake_case = word.replace(SCREAMING_SNAKE_CASE_ , '' )
__snake_case = word.replace(' ' , SCREAMING_SNAKE_CASE_ )
__snake_case = word
return word
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
__snake_case = text.lower()
__snake_case = text.split()
__snake_case = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = ' '.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
__snake_case = ''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
__snake_case = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__snake_case = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 718
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase (lowerCamelCase , unittest.TestCase ):
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def __lowerCamelCase ( self ):
super().setUp()
# fmt: off
__snake_case = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = 'tester'
__snake_case = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__snake_case = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ):
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE_ ) , 0 )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(text_a.replace(' ' , '' ) , SCREAMING_SNAKE_CASE_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def __lowerCamelCase ( self ):
pass
| 345
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] = logging.get_logger(__name__)
_A : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : int = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : Tuple = GPTaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''add_bos_token''' , A_ )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(A_ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**A_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ = input_ids[-self.model_max_length :]
return input_ids
| 100
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666
| 0
|
def _a ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Optional[int] = len(__lowerCAmelCase )
snake_case__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case__ : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case__ : int = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case__ : List[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = FlaxAutoencoderKL
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Any = 4
snake_case__ : Optional[Any] = 3
snake_case__ : Optional[int] = (3_2, 3_2)
snake_case__ : Optional[int] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = jax.random.uniform(snake_case_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
| 502
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Tuple = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if exitstatus == 5:
__lowercase = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
__UpperCamelCase : List[Any] = doctest.OutputChecker
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Optional[int] = CustomOutputChecker
__UpperCamelCase : Optional[Any] = HfDoctestModule
__UpperCamelCase : Any = HfDocTestParser
| 80
|
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(_lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowercase = list(_lowerCAmelCase )
__lowercase = degree
def __add__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowerCAmelCase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowerCAmelCase )
def __sub__( self : int , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[int] , _lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowerCAmelCase )
def _a ( self : Optional[int] , _lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
"""simple docstring"""
__lowercase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCAmelCase )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.__str__()
def _a ( self : List[str] ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowerCAmelCase )
def __eq__( self : List[str] , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Dict , _lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(_lowerCAmelCase )
| 80
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
_UpperCamelCase : int = BlipImageProcessor()
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCamelCase : Any = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowercase_ (self , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def lowercase_ (self , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def lowercase_ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCamelCase : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCamelCase : str = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
_UpperCamelCase : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_image_processor()
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Any = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
_UpperCamelCase : Union[str, Any] = image_processor(lowerCAmelCase__ , return_tensors="np" )
_UpperCamelCase : List[str] = processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : str = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
_UpperCamelCase : str = "lower newer"
_UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ )
_UpperCamelCase : str = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_image_processor()
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Dict = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
_UpperCamelCase : int = "lower newer"
_UpperCamelCase : List[Any] = self.prepare_image_inputs()
_UpperCamelCase : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : str = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Dict = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : str = processor.batch_decode(lowerCAmelCase__ )
_UpperCamelCase : int = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = "lower newer"
_UpperCamelCase : Tuple = self.prepare_image_inputs()
_UpperCamelCase : Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 239
|
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def __lowerCAmelCase ( __lowerCAmelCase : float ) -> str:
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = int(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : List[str] = False
if decimal < 0:
_UpperCamelCase : List[str] = True
decimal *= -1
while decimal > 0:
_UpperCamelCase , _UpperCamelCase : str = divmod(__lowerCAmelCase , 16 )
_UpperCamelCase : Any = values[remainder] + hexadecimal
_UpperCamelCase : Dict = "0x" + hexadecimal
if negative:
_UpperCamelCase : int = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def a ( __snake_case : str, __snake_case : str = "cpu", __snake_case : Union[str, None] = None ):
'''simple docstring'''
UpperCAmelCase_ :str = torch.load(__snake_case, map_location=__snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__snake_case, torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase_ :Tuple = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase_ :int = src_path
torch.save(__snake_case, __snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 608
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def a ( __snake_case : Any, __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ :Any = b.T
UpperCAmelCase_ :Tuple = np.sum(np.square(__snake_case ), axis=1 )
UpperCAmelCase_ :List[str] = np.sum(np.square(__snake_case ), axis=0 )
UpperCAmelCase_ :Optional[Any] = np.matmul(__snake_case, __snake_case )
UpperCAmelCase_ :str = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( __snake_case : Union[str, Any], __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = x.reshape(-1, 3 )
UpperCAmelCase_ :List[str] = squared_euclidean_distance(__snake_case, __snake_case )
return np.argmin(__snake_case, axis=1 )
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =["""pixel_values"""]
def __init__( self : Any , snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : bool = True , snake_case : bool = True , **snake_case : Optional[Any] , ):
super().__init__(**snake_case )
UpperCAmelCase_ :List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase_ :Any = get_size_dict(snake_case )
UpperCAmelCase_ :str = np.array(snake_case ) if clusters is not None else None
UpperCAmelCase_ :List[Any] = do_resize
UpperCAmelCase_ :Optional[int] = size
UpperCAmelCase_ :Union[str, Any] = resample
UpperCAmelCase_ :Dict = do_normalize
UpperCAmelCase_ :Optional[Any] = do_color_quantize
def snake_case_ ( self : Any , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict , ):
UpperCAmelCase_ :Any = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
snake_case , size=(size['''height'''], size['''width''']) , resample=snake_case , data_format=snake_case , **snake_case )
def snake_case_ ( self : Tuple , snake_case : np.ndarray , snake_case : Optional[Union[str, ChannelDimension]] = None , ):
UpperCAmelCase_ :Optional[int] = rescale(image=snake_case , scale=1 / 127.5 , data_format=snake_case )
UpperCAmelCase_ :str = image - 1
return image
def snake_case_ ( self : Any , snake_case : ImageInput , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **snake_case : List[Any] , ):
UpperCAmelCase_ :Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ :List[Any] = size if size is not None else self.size
UpperCAmelCase_ :Optional[int] = get_size_dict(snake_case )
UpperCAmelCase_ :Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ :str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ :str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase_ :Optional[int] = clusters if clusters is not None else self.clusters
UpperCAmelCase_ :Any = np.array(snake_case )
UpperCAmelCase_ :Tuple = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ :Optional[Any] = [to_numpy_array(snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ :Optional[Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ :Optional[Any] = [self.normalize(image=snake_case ) for image in images]
if do_color_quantize:
UpperCAmelCase_ :List[Any] = [to_channel_dimension_format(snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase_ :str = np.array(snake_case )
UpperCAmelCase_ :Dict = color_quantize(snake_case , snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase_ :Tuple = images.shape[0]
UpperCAmelCase_ :int = images.reshape(snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase_ :int = list(snake_case )
else:
UpperCAmelCase_ :Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
UpperCAmelCase_ :Optional[Any] = {'''input_ids''': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 608
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : str = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : List[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Dict = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : List[Any] = prophet
__lowerCamelCase : Optional[int] = prophet_old
else:
__lowerCamelCase : Dict = prophet.prophetnet
__lowerCamelCase : List[str] = prophet_old.model
__lowerCamelCase : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
__lowerCamelCase : Union[str, Any] = attribute
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : Tuple = old_model.weight
logger.info(f'{attribute} is initialized.' )
__lowerCamelCase : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Tuple = old_model.bias
logger.info(f'{attribute} is initialized' )
__lowerCamelCase : List[str] = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE__ , 'in_proj_weight' ):
__lowerCamelCase : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : Optional[int] = model[int(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase : Union[str, Any] = old_model[int(SCREAMING_SNAKE_CASE__ )]
else:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if old_attribute == "":
__lowerCamelCase : Tuple = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not is_key_init:
raise ValueError(f'{key} was not correctly initialized!' )
print(f'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 230
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 230
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
snake_case = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__lowerCAmelCase , max_perimeter + 1 ):
snake_case = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCAmelCase ):
snake_case = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] = 10_00 ) -> str:
snake_case = pythagorean_triple(__lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _A( lowerCAmelCase , lowerCAmelCase=False ):
A__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ : List[str] = """"""
else:
A__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
A__ : Tuple = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ : Tuple = in_proj_weight[
: config.hidden_size, :
]
A__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
A__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : int = in_proj_weight[
-config.hidden_size :, :
]
A__ : List[Any] = in_proj_bias[-config.hidden_size :]
def _A( lowerCAmelCase ):
A__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _A( lowerCAmelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A__ : Dict = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[Any] = dct.pop(lowerCAmelCase )
A__ : List[Any] = val
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : str = ViTMSNConfig()
A__ : List[str] = 1000
A__ : Optional[int] = """datasets/huggingface/label-files"""
A__ : Optional[int] = """imagenet-1k-id2label.json"""
A__ : str = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase ) , """r""" ) )
A__ : Tuple = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A__ : Optional[Any] = idalabel
A__ : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A__ : Tuple = 384
A__ : int = 1536
A__ : Union[str, Any] = 6
elif "l16" in checkpoint_url:
A__ : int = 1024
A__ : int = 4096
A__ : Any = 24
A__ : int = 16
A__ : Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
A__ : Dict = 4
elif "l7" in checkpoint_url:
A__ : List[str] = 7
A__ : Optional[int] = 1024
A__ : Optional[int] = 4096
A__ : Any = 24
A__ : Union[str, Any] = 16
A__ : Tuple = 0.1
A__ : List[str] = ViTMSNModel(lowerCAmelCase )
A__ : Any = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )["""target_encoder"""]
A__ : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase )
A__ : List[str] = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , base_model=lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
A__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
A__ : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
A__ : Any = image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
A__ : Tuple = model(**lowerCAmelCase )
A__ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A__ : Union[str, Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
A__ : int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A__ : List[Any] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A__ : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
A__ : str = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 363
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = "https://openaipublic.azureedge.net/jukebox/models/"
a_ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def a__ ( _UpperCamelCase : Tuple ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowerCamelCase = key.replace('''.model.1.bias''' ,'''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowerCamelCase = key.replace('''.model.1.weight''' ,'''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowerCamelCase = key.replace('''.model.3.bias''' ,'''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowerCamelCase = key.replace('''.model.3.weight''' ,'''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__lowerCamelCase = key.replace('''conditioner_blocks.0''' ,'''conditioner_blocks''' )
if "prime_prior" in key:
__lowerCamelCase = key.replace('''prime_prior''' ,'''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCamelCase = key.replace('''.emb.''' ,'''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' ,'''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' ,'''metadata_embedding.''' )
if "x_emb.emb." in key:
__lowerCamelCase = key.replace('''0.x_emb.emb''' ,'''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' ,'''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' ,'''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' ,'''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' ,'''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' ,'''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' ,'''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' ,'''embed_tokens''' )
return key
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any] ):
__lowerCamelCase = {}
import re
__lowerCamelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowerCamelCase = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowerCamelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowerCamelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowerCamelCase = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowerCamelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowerCamelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__lowerCamelCase = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowerCamelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_encoder_block_conv_in.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__lowerCamelCase = re_encoder_block_conv_in.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_encoder_block_resnet.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowerCamelCase = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__lowerCamelCase = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_encoder_block_resnet.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_encoder_block_proj_out.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__lowerCamelCase = re_encoder_block_proj_out.sub(_UpperCamelCase ,_UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_decoder_block_conv_out.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__lowerCamelCase = re_decoder_block_conv_out.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_decoder_block_resnet.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowerCamelCase = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__lowerCamelCase = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_decoder_block_resnet.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_decoder_block_proj_in.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__lowerCamelCase = re_decoder_block_proj_in.sub(_UpperCamelCase ,_UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_prior_cond_conv_out.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__lowerCamelCase = re_prior_cond_conv_out.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_prior_cond_resnet.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowerCamelCase = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__lowerCamelCase = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__lowerCamelCase = prefix + resnet_block
__lowerCamelCase = re_prior_cond_resnet.sub(_UpperCamelCase ,_UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCamelCase ):
__lowerCamelCase = re_prior_cond_proj_in.match(_UpperCamelCase )
__lowerCamelCase = regex_match.groups()
__lowerCamelCase = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__lowerCamelCase = re_prior_cond_proj_in.sub(_UpperCamelCase ,_UpperCamelCase )
# keep original key
else:
__lowerCamelCase = original_key
__lowerCamelCase = replace_key(_UpperCamelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__lowerCamelCase = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__lowerCamelCase = original_key
__lowerCamelCase = original_key
__lowerCamelCase = value
return new_dict
@torch.no_grad()
def a__ ( _UpperCamelCase : List[Any]=None ,_UpperCamelCase : int=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__lowerCamelCase = requests.get(F"""{PREFIX}{file}""" ,allow_redirects=_UpperCamelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" ,exist_ok=_UpperCamelCase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ,'''wb''' ).write(r.content )
__lowerCamelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__lowerCamelCase = JukeboxConfig.from_pretrained(_UpperCamelCase )
__lowerCamelCase = JukeboxModel(_UpperCamelCase )
__lowerCamelCase = []
__lowerCamelCase = {}
for i, dict_name in enumerate(_UpperCamelCase ):
__lowerCamelCase = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
__lowerCamelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__lowerCamelCase = old_dic[k]
elif k.endswith('''.w''' ):
__lowerCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCamelCase = old_dic[k]
else:
__lowerCamelCase = old_dic[k]
__lowerCamelCase = '''vqvae''' if i == 0 else F"""priors.{3 - i}"""
__lowerCamelCase = fix_jukebox_keys(_UpperCamelCase ,model.state_dict() ,_UpperCamelCase ,_UpperCamelCase )
weight_dict.append(_UpperCamelCase )
__lowerCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" ,'''w''' ) as txtfile:
json.dump(_UpperCamelCase ,_UpperCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
return weight_dict
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 705
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622
| 0
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class A ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 464
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Dict =DebertaVaTokenizer
UpperCamelCase__ : Union[str, Any] =DebertaVaTokenizerFast
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Tuple =True
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Dict ='this is a test'
_lowerCamelCase : List[Any] ='this is a test'
return input_text, output_text
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ='<pad>'
_lowerCamelCase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowercase_ ) , 3_0001 )
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : List[Any] =['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowerCamelCase : Any =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ )
_lowerCamelCase : Dict =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : int =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : int =DebertaVaTokenizer(lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =DebertaVaTokenizerFast(lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Tuple =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Union[str, Any] =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[int] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : int ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Union[str, Any] =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Any =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Tuple =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Tuple =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : str =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[int] =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Dict =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =' \tHeLLo!how \n Are yoU? '
_lowerCamelCase : Optional[Any] =['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowerCamelCase : Optional[int] =DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
_lowerCamelCase : Any =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.get_tokenizer()
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : str ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =self.get_rust_tokenizer()
_lowerCamelCase : List[Any] =tokenizer.encode(lowercase_ )
_lowerCamelCase : str =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] ='This is a test'
_lowerCamelCase : Optional[int] =[13, 1, 4398, 25, 21, 1289]
_lowerCamelCase : List[Any] =['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Optional[int] =['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowerCamelCase : Optional[Any] =DebertaVaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : Any =DebertaVaTokenizerFast(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Tuple =tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Any =rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# fmt: off
_lowerCamelCase : Optional[Any] ='I was born in 92000, and this is falsé.'
_lowerCamelCase : str =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_lowerCamelCase : Optional[int] =['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowerCamelCase : Dict =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowerCamelCase : Any =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : int =DebertaVaTokenizer(lowercase_ )
_lowerCamelCase : str =tokenizer.encode('sequence builders' )
_lowerCamelCase : List[Any] =tokenizer.encode('multi-sequence build' )
_lowerCamelCase : List[Any] =tokenizer.build_inputs_with_special_tokens(lowercase_ )
_lowerCamelCase : Dict =tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowercase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowercase_ , )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict ={'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 464
| 1
|
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ) -> int:
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Dict:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.decode(*A__ , **A__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
| 0
| 0
|
from ... import PretrainedConfig
__UpperCamelCase : int = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case :Dict = 'nezha'
def __init__( self : int , _lowerCAmelCase : List[Any]=2_1128 , _lowerCAmelCase : Tuple=768 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[Any]=64 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : int=True , **_lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = max_relative_position
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout
__lowercase = use_cache
| 80
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )]
return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 10**-1 ):
"""simple docstring"""
__magic_name__ : NDArray[floataa] = cross(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : float = sum(UpperCamelCase__ )
return abs(UpperCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
_SCREAMING_SNAKE_CASE : List[str] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
_SCREAMING_SNAKE_CASE : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_SCREAMING_SNAKE_CASE : int = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
_SCREAMING_SNAKE_CASE : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_SCREAMING_SNAKE_CASE : str = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
_SCREAMING_SNAKE_CASE : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 436
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE )
__lowercase = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Parse args
__lowercase , __lowercase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(SCREAMING_SNAKE_CASE )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 708
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__snake_case = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
__snake_case = sylvester(number - 1 )
__snake_case = num - 1
__snake_case = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 69
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ : Optional[Any] = ''
__magic_name__ : Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ : str = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ : Dict = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ : Tuple = 0
for j in range(len(lowerCAmelCase ) ):
__magic_name__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ : Union[str, Any] = j - k + 1 # noqa: E741
__magic_name__ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ : Tuple = length[j]
__magic_name__ : Tuple = j
# create that string
__magic_name__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
| 0
|
import math
def A_ ( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCAmelCase )
if number < 1:
UpperCamelCase : Union[str, Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_lowerCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCamelCase : List[Any] = int(math.log(number // 3 , 2 ) ) + 2
UpperCamelCase : Optional[Any] = [3, 5]
UpperCamelCase : Optional[int] = 2
UpperCamelCase : int = 3
for block in range(1 , _lowerCAmelCase ):
for _ in range(_lowerCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__lowerCamelCase : List[str] = 0
try:
__lowerCamelCase : Optional[Any] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 38
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18}
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = num_channels
UpperCamelCase : Union[str, Any] = num_frames
UpperCamelCase : Any = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Any = image_std
UpperCamelCase : str = crop_size
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCamelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCamelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 38
| 1
|
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = checkpoint
_a = {}
_a = vae_state_dict['''encoder.conv_in.weight''']
_a = vae_state_dict['''encoder.conv_in.bias''']
_a = vae_state_dict['''encoder.conv_out.weight''']
_a = vae_state_dict['''encoder.conv_out.bias''']
_a = vae_state_dict['''encoder.norm_out.weight''']
_a = vae_state_dict['''encoder.norm_out.bias''']
_a = vae_state_dict['''decoder.conv_in.weight''']
_a = vae_state_dict['''decoder.conv_in.bias''']
_a = vae_state_dict['''decoder.conv_out.weight''']
_a = vae_state_dict['''decoder.conv_out.bias''']
_a = vae_state_dict['''decoder.norm_out.weight''']
_a = vae_state_dict['''decoder.norm_out.bias''']
_a = vae_state_dict['''quant_conv.weight''']
_a = vae_state_dict['''quant_conv.bias''']
_a = vae_state_dict['''post_quant_conv.weight''']
_a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_a = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_a = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_a = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
_a = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
_a = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
_a = renew_vae_resnet_paths(UpperCamelCase )
_a = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
_a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
_a = renew_vae_resnet_paths(UpperCamelCase )
_a = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
_a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_a = renew_vae_attention_paths(UpperCamelCase )
_a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_a = num_up_blocks - 1 - i
_a = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
_a = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
_a = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
_a = renew_vae_resnet_paths(UpperCamelCase )
_a = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
_a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
_a = renew_vae_resnet_paths(UpperCamelCase )
_a = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
_a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_a = renew_vae_attention_paths(UpperCamelCase )
_a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , ):
'''simple docstring'''
_a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_a = io.BytesIO(r.content )
_a = OmegaConf.load(UpperCamelCase )
_a = 512
_a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_a = {}
with safe_open(UpperCamelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
_a = f.get_tensor(UpperCamelCase )
else:
_a = torch.load(UpperCamelCase , map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_a = create_vae_diffusers_config(UpperCamelCase , image_size=UpperCamelCase )
_a = custom_convert_ldm_vae_checkpoint(UpperCamelCase , UpperCamelCase )
_a = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase: Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase: Any = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase: Union[str, Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase: Dict = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowerCAmelCase: Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase: List[Any] = 'tabby, tabby cat'
lowerCAmelCase: Tuple = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( _A , _A , _A=None ):
a : str = {}
if isinstance(_A , _A ):
a : Optional[int] = model.mobilenet_va
else:
a : List[Any] = model
a : List[str] = 'MobilenetV1/Conv2d_0/'
a : List[str] = backbone.conv_stem.convolution.weight
a : Optional[int] = backbone.conv_stem.normalization.bias
a : Tuple = backbone.conv_stem.normalization.weight
a : Any = backbone.conv_stem.normalization.running_mean
a : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : Any = i + 1
a : Tuple = i * 2
a : Any = backbone.layer[pt_index]
a : Optional[int] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
a : Dict = pointer.convolution.weight
a : List[str] = pointer.normalization.bias
a : Optional[Any] = pointer.normalization.weight
a : Optional[int] = pointer.normalization.running_mean
a : Any = pointer.normalization.running_var
a : Any = backbone.layer[pt_index + 1]
a : List[Any] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
a : str = pointer.convolution.weight
a : List[Any] = pointer.normalization.bias
a : List[Any] = pointer.normalization.weight
a : str = pointer.normalization.running_mean
a : Tuple = pointer.normalization.running_var
if isinstance(_A , _A ):
a : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
a : List[str] = model.classifier.weight
a : Tuple = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( _A , _A , _A ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
a : Dict = tf.train.list_variables(_A )
a : Any = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
a : int = tf.train.load_variable(_A , _A )
a : Dict = array
# Build TF to PyTorch weights loading map
a : Union[str, Any] = _build_tf_to_pytorch_map(_A , _A , _A )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
a : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
a : Any = np.transpose(_A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
a : Optional[Any] = array.squeeze().transpose()
else:
a : Union[str, Any] = np.transpose(_A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
a : Optional[Any] = torch.from_numpy(_A )
tf_weights.pop(_A , _A )
tf_weights.pop(name + '/RMSProp' , _A )
tf_weights.pop(name + '/RMSProp_1' , _A )
tf_weights.pop(name + '/ExponentialMovingAverage' , _A )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase__ ( _A , _A ):
a , a : Optional[Any] = features.shape[-2:]
a , a : Any = conv_layer.stride
a , a : List[str] = conv_layer.kernel_size
if in_height % stride_height == 0:
a : str = max(kernel_height - stride_height , 0 )
else:
a : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : int = max(kernel_width - stride_width , 0 )
else:
a : str = max(kernel_width - (in_width % stride_width) , 0 )
a : Dict = pad_along_width // 2
a : str = pad_along_width - pad_left
a : Union[str, Any] = pad_along_height // 2
a : Optional[int] = pad_along_height - pad_top
a : List[str] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_A , _A , 'constant' , 0.0 )
class a__( nn.Module ):
def __init__( self : Union[str, Any] , __snake_case : MobileNetVaConfig , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Optional[int] = 1 , __snake_case : Optional[int] = 1 , __snake_case : bool = False , __snake_case : Optional[bool] = True , __snake_case : Optional[bool or str] = True , ):
super().__init__()
a : Any = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
a : Dict = nn.Convad(
in_channels=__snake_case , out_channels=__snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case , groups=__snake_case , bias=__snake_case , padding_mode='zeros' , )
if use_normalization:
a : int = nn.BatchNormad(
num_features=__snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=__snake_case , track_running_stats=__snake_case , )
else:
a : Any = None
if use_activation:
if isinstance(__snake_case , __snake_case ):
a : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __snake_case ):
a : Dict = ACTaFN[config.hidden_act]
else:
a : List[str] = config.hidden_act
else:
a : int = None
def lowercase_ ( self : str , __snake_case : torch.Tensor ):
if self.config.tf_padding:
a : str = apply_tf_padding(__snake_case , self.convolution )
a : Any = self.convolution(__snake_case )
if self.normalization is not None:
a : int = self.normalization(__snake_case )
if self.activation is not None:
a : Union[str, Any] = self.activation(__snake_case )
return features
class a__( lowerCamelCase__ ):
lowercase__ = MobileNetVaConfig
lowercase__ = load_tf_weights_in_mobilenet_va
lowercase__ = """mobilenet_v1"""
lowercase__ = """pixel_values"""
lowercase__ = False
def lowercase_ ( self : Dict , __snake_case : Union[nn.Linear, nn.Convad] ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase: Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase: Any = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCamelCase__ , )
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : MobileNetVaConfig , __snake_case : bool = True ):
super().__init__(__snake_case )
a : Any = config
a : Any = 32
a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
a : List[Any] = MobileNetVaConvLayer(
__snake_case , in_channels=config.num_channels , out_channels=__snake_case , kernel_size=3 , stride=2 , )
a : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : Optional[int] = nn.ModuleList()
for i in range(13 ):
a : Any = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=3 , stride=strides[i] , groups=__snake_case , ) )
self.layer.append(
MobileNetVaConvLayer(
__snake_case , in_channels=__snake_case , out_channels=__snake_case , kernel_size=1 , ) )
a : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase_ ( self : Optional[int] , __snake_case : int ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ ( self : Any , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ):
a : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
a : int = self.conv_stem(__snake_case )
a : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
a : Tuple = layer_module(__snake_case )
if output_hidden_states:
a : int = all_hidden_states + (hidden_states,)
a : List[Any] = hidden_states
if self.pooler is not None:
a : Optional[int] = torch.flatten(self.pooler(__snake_case ) , start_dim=1 )
else:
a : Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=__snake_case , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : MobileNetVaConfig ):
super().__init__(__snake_case )
a : List[str] = config.num_labels
a : str = MobileNetVaModel(__snake_case )
a : Any = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : List[str] = nn.Dropout(config.classifier_dropout_prob , inplace=__snake_case )
a : Optional[Any] = nn.Linear(__snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ ( self : int , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , ):
a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : List[str] = self.mobilenet_va(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case )
a : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
a : Any = self.classifier(self.dropout(__snake_case ) )
a : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Any = 'single_label_classification'
else:
a : Any = 'multi_label_classification'
if self.config.problem_type == "regression":
a : List[Any] = MSELoss()
if self.num_labels == 1:
a : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : List[str] = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
a : Tuple = CrossEntropyLoss()
a : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : List[Any] = BCEWithLogitsLoss()
a : Optional[int] = loss_fct(__snake_case , __snake_case )
if not return_dict:
a : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states , )
| 195
|
'''simple docstring'''
from pathlib import Path
import fire
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = Path(_A )
a : Tuple = Path(_A )
dest_dir.mkdir(exist_ok=_A )
for path in src_dir.iterdir():
a : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
a : List[str] = dest_dir.joinpath(path.name )
print(_A )
dest_path.open('w' ).write('\n'.join(_A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 195
| 1
|
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , UpperCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ , 0 , -1 ):
for _ in range(UpperCamelCase__ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(UpperCamelCase__ ) # upper half
reverse_floyd(UpperCamelCase__ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
_snake_case = 1
while K:
_snake_case = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
_snake_case = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 389
|
"""simple docstring"""
_snake_case = 6_5521
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = 1
_a : Optional[int] = 0
for plain_chr in plain_text:
_a : Dict = (a + ord(UpperCamelCase__ )) % MOD_ADLER
_a : List[Any] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 389
| 1
|
from ....utils import logging
__a : List[str] = logging.get_logger(__name__)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=2048 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = config.__dict__
lowercase__ : Union[str, Any] = modal_hidden_size
if num_labels:
lowercase__ : Union[str, Any] = num_labels
| 298
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> List[Any]:
lowercase__ : List[str] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
lowercase__ , lowercase__ : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase__ : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE_ )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase__ : Tuple = file_path.read_text(encoding="utf-8" )
else:
lowercase__ : List[str] = output_path.read_text(encoding="utf-8" )
lowercase__ : Dict = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> Optional[Any]:
lowercase__ : Optional[Any] = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
lowercase__ : List[Any] = input_paths[compression_format]
if input_path is None:
lowercase__ : Union[str, Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
assert extractor_format is not None
lowercase__ : Optional[int] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase__ : Tuple = file_path.read_text(encoding="utf-8" )
else:
lowercase__ : Dict = output_path.read_text(encoding="utf-8" )
lowercase__ : Optional[Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
import tarfile
lowercase__ : Any = tmp_path / "data_dot_dot"
directory.mkdir()
lowercase__ : Union[str, Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join(".." ,text_file.name ) )
return path
@pytest.fixture
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
import tarfile
lowercase__ : List[str] = tmp_path / "data_sym_link"
directory.mkdir()
lowercase__ : Tuple = directory / "tar_file_with_sym_link.tar"
os.symlink(".." ,directory / "subdir" ,target_is_directory=SCREAMING_SNAKE_CASE_ )
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(str(directory / "subdir" ) ,arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" ,[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : Dict = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
lowercase__ : Dict = insecure_tar_files[insecure_tar_file]
lowercase__ : Optional[int] = tmp_path / "extracted"
TarExtractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase__ : Dict = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
lowercase__ : str = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE_ ) # but we're right
| 298
| 1
|
from __future__ import annotations
import requests
lowerCamelCase__ : int = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "new" , __UpperCAmelCase : list | None = None ) -> dict:
SCREAMING_SNAKE_CASE_ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__UpperCAmelCase ) - valid_terms ) ):
SCREAMING_SNAKE_CASE_ = f"Invalid search term: {invalid_search_terms}"
raise ValueError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
SCREAMING_SNAKE_CASE_ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__UpperCAmelCase )}
SCREAMING_SNAKE_CASE_ = {}
for id_ in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 31
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28
| 0
|
import sys
__lowercase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def snake_case__ ( _A: str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = 1
for digit in s:
product *= int(_A )
return product
def snake_case__ ( _A: str = N ) -> Dict:
'''simple docstring'''
lowerCAmelCase = -sys.maxsize - 1
lowerCAmelCase = n[:13]
lowerCAmelCase = 13
while cur_index < len(_A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase = max(_A , str_eval(_A ) )
lowerCAmelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 702
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase = tf_top_k_top_p_filtering(__lowerCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
lowerCAmelCase = output[output != -float("""inf""")]
lowerCAmelCase = tf.cast(
tf.where(tf.not_equal(__lowerCAmelCase , tf.constant(-float("""inf""") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-1_2)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@require_tf
class a__( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
if is_tf_available():
UpperCAmelCase_ : Dict = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 2
lowerCAmelCase = 2
class a__( tf.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super(__lowerCAmelCase , self).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask"""),
) , jit_compile=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , max_new_tokens=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2, 0], [102, 103]]
lowerCAmelCase = [[1, 0], [1, 1]]
lowerCAmelCase = DummyModel(model=__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving})
lowerCAmelCase = tf.saved_model.load(__lowerCAmelCase).signatures["""serving_default"""]
for batch_size in range(1 , len(__lowerCAmelCase) + 1):
lowerCAmelCase = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size]),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size]),
}
lowerCAmelCase = serving_func(**__lowerCAmelCase)["""sequences"""]
lowerCAmelCase = test_model.generate(**__lowerCAmelCase , max_new_tokens=__lowerCAmelCase)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 1
lowerCAmelCase = 2
class a__( tf.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super(__lowerCAmelCase , self).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask"""),
) , jit_compile=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , max_new_tokens=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2], [102, 103]]
lowerCAmelCase = [[1], [1, 1]]
lowerCAmelCase = DummyModel(model=__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving})
lowerCAmelCase = tf.saved_model.load(__lowerCAmelCase).signatures["""serving_default"""]
for input_row in range(len(__lowerCAmelCase)):
lowerCAmelCase = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]]),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]]),
}
lowerCAmelCase = serving_func(**__lowerCAmelCase)["""sequences"""]
lowerCAmelCase = test_model.generate(**__lowerCAmelCase , max_new_tokens=__lowerCAmelCase)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@slow
@require_tensorflow_text
def a_ ( self):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__lowerCAmelCase)
class a__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self):
"""simple docstring"""
super().__init__()
lowerCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowerCAmelCase , """spiece.model""") , """rb""").read())
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""")
def a_ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = text.pad_model_inputs(
__lowerCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase)
return self.tokenizer.detokenize(__lowerCAmelCase)
lowerCAmelCase = CompleteSentenceTransformer()
lowerCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""")
lowerCAmelCase = complete_model(__lowerCAmelCase)
lowerCAmelCase = tf.keras.Model(__lowerCAmelCase , __lowerCAmelCase)
keras_model.save(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowerCAmelCase = 14
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = """Hello, my dog is cute and"""
lowerCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""tf""")
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
lowerCAmelCase = model.generate(**__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
lowerCAmelCase = [638, 198]
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
lowerCAmelCase = model.generate(**__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = """Hugging Face is a technology company based in New York and Paris."""
lowerCAmelCase = bart_tokenizer(__lowerCAmelCase , return_tensors="""tf""").input_ids
lowerCAmelCase = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = bart_model.generate(__lowerCAmelCase).numpy()
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
return super().call(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = bart_model.generate(__lowerCAmelCase , foo="""bar""").numpy()
self.assertTrue(np.array_equal(__lowerCAmelCase , __lowerCAmelCase))
class a__( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().call(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared)
lowerCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase = bart_model.generate(__lowerCAmelCase).numpy()
with self.assertRaises(__lowerCAmelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowerCAmelCase , foo="""bar""")
| 605
| 0
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _A ( lowerCamelCase ):
a__ : Optional[int] = torch.exp(lowerCamelCase )
a__ : Optional[Any] = torch.sum(lowerCamelCase , dim=1 ) # sum of exp(x_i)
a__ : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCamelCase ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case ) -> Tuple:
"""simple docstring"""
super().__init__()
a__ : Dict = config.output_attentions
a__ : Tuple = config.output_hidden_states
a__ : Optional[Any] = nn.ModuleList([BertLayer(snake_case ) for _ in range(config.num_hidden_layers )] )
a__ : Optional[int] = nn.ModuleList([BertHighway(snake_case ) for _ in range(config.num_hidden_layers )] )
a__ : Union[str, Any] = [-1 for _ in range(config.num_hidden_layers )]
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
if (type(snake_case ) is float) or (type(snake_case ) is int):
for i in range(len(self.early_exit_entropy ) ):
a__ : Optional[Any] = x
else:
a__ : Optional[int] = x
def _snake_case ( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _snake_case ( self , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = ()
a__ : List[str] = ()
a__ : Any = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
a__ : List[str] = all_hidden_states + (hidden_states,)
a__ : Dict = layer_module(
snake_case , snake_case , head_mask[i] , snake_case , snake_case )
a__ : Optional[Any] = layer_outputs[0]
if self.output_attentions:
a__ : Optional[Any] = all_attentions + (layer_outputs[1],)
a__ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
a__ : Optional[Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
a__ : Union[str, Any] = current_outputs + (all_attentions,)
a__ : List[str] = self.highway[i](snake_case )
# logits, pooled_output
if not self.training:
a__ : Optional[int] = highway_exit[0]
a__ : int = entropy(snake_case )
a__ : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
a__ : int = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
a__ : int = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case , i + 1 )
else:
a__ : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
a__ : Optional[int] = all_hidden_states + (hidden_states,)
a__ : Any = (hidden_states,)
if self.output_hidden_states:
a__ : Tuple = outputs + (all_hidden_states,)
if self.output_attentions:
a__ : List[Any] = outputs + (all_attentions,)
a__ : Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(snake_case )
a__ : List[Any] = config
a__ : str = BertEmbeddings(snake_case )
a__ : List[str] = DeeBertEncoder(snake_case )
a__ : int = BertPooler(snake_case )
self.init_weights()
def _snake_case ( self ) -> str:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return self.embeddings.word_embeddings
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = value
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ) -> List[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
a__ : Dict = input_ids.size()
elif inputs_embeds is not None:
a__ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
a__ : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a__ : Tuple = torch.ones(snake_case , device=snake_case )
if encoder_attention_mask is None:
a__ : Optional[int] = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
a__ : Tuple = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a__ : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
a__ : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
a__ : List[str] = encoder_attention_mask[:, None, None, :]
a__ : Optional[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
a__ : Any = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a__ : List[str] = self.get_head_mask(snake_case , self.config.num_hidden_layers )
a__ : Optional[Any] = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
a__ : str = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
a__ : str = encoder_outputs[0]
a__ : Any = self.pooler(snake_case )
a__ : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = message
a__ : List[Any] = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
a__ : str = BertPooler(snake_case )
a__ : Tuple = nn.Dropout(config.hidden_dropout_prob )
a__ : Tuple = nn.Linear(config.hidden_size , config.num_labels )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : List[str] = encoder_outputs[0]
a__ : Optional[int] = self.pooler(snake_case )
# "return" pooler_output
# BertModel
a__ : int = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
a__ : Any = bmodel_output[1]
a__ : List[str] = self.dropout(snake_case )
a__ : int = self.classifier(snake_case )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ ,_UpperCamelCase ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case ) -> str:
"""simple docstring"""
super().__init__(snake_case )
a__ : Optional[Any] = config.num_labels
a__ : Any = config.num_hidden_layers
a__ : Any = DeeBertModel(snake_case )
a__ : List[str] = nn.Dropout(config.hidden_dropout_prob )
a__ : Any = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def _snake_case ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.num_layers
try:
a__ : Dict = self.bert(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
a__ : Any = outputs[1]
a__ : Any = self.dropout(snake_case )
a__ : Union[str, Any] = self.classifier(snake_case )
a__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a__ : int = e.message
a__ : Optional[Any] = e.exit_layer
a__ : Optional[int] = outputs[0]
if not self.training:
a__ : Optional[int] = entropy(snake_case )
a__ : List[Any] = []
a__ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a__ : str = MSELoss()
a__ : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : Optional[Any] = CrossEntropyLoss()
a__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a__ : Dict = []
for highway_exit in outputs[-1]:
a__ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a__ : int = MSELoss()
a__ : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a__ : Tuple = CrossEntropyLoss()
a__ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case )
if train_highway:
a__ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a__ : Any = (loss,) + outputs
if not self.training:
a__ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a__ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 112
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = """cpu"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
SCREAMING_SNAKE_CASE__ : List[Any] = """path-to-your-trained-model"""
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE__ : int = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE__ : Any = torch.rand(1) * 9_9_9
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(2, 7_7, 7_6_8)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE__ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6_6_6
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE__ : int = {"""generator""": generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE__ : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 112
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
lowerCAmelCase_: Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
with open(_lowerCamelCase , "r" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
lowercase__ = line.strip()
if line:
lowercase__ = line.split()
lowercase__ = line_number
lowercase__ = words[0]
lowercase__ = value
return result
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowercase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ = hf_pointer
for attribute in hf_param_name.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = shape_pointer.shape
# let's reduce dimension
lowercase__ = value[0]
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowercase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ = '.'.join([key, hf_param_name] )
else:
lowercase__ = key
lowercase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase_: List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __a ( A , A , A=None , A=None ):
'''simple docstring'''
lowercase__ = False
for key, mapped_key in MAPPING.items():
lowercase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_lowerCamelCase )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = 'weight'
else:
lowercase__ = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
lowercase__ = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def __a ( A , A , A=None , A=None , A=True , A=False ):
'''simple docstring'''
if config_path is not None:
lowercase__ = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
lowercase__ = WavaVecaConfig()
if is_seq_class:
lowercase__ = read_txt_into_dict(_lowerCamelCase )
lowercase__ = idalabel
lowercase__ = WavaVecaForSequenceClassification(_lowerCamelCase )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
lowercase__ = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
lowercase__ = True if config.feat_extract_norm == 'layer' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
lowercase__ = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
lowercase__ = WavaVecaForCTC(_lowerCamelCase )
else:
lowercase__ = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task="audio_pretraining" )
lowercase__ = fairseq.tasks.setup_task(_lowerCamelCase )
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
lowercase__ = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
lowerCAmelCase_: List[Any] = parser.parse_args()
lowerCAmelCase_: List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 716
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Union[str, Any] = {}
class a ( _lowerCamelCase ):
snake_case_ = "llama"
snake_case_ = ["past_key_values"]
def __init__( self : Optional[Any] , lowercase_ : Optional[int]=3_2000 , lowercase_ : Union[str, Any]=4096 , lowercase_ : List[str]=1_1008 , lowercase_ : str=32 , lowercase_ : List[Any]=32 , lowercase_ : List[str]=None , lowercase_ : Tuple="silu" , lowercase_ : Optional[int]=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[int]=1e-6 , lowercase_ : int=True , lowercase_ : List[str]=0 , lowercase_ : Dict=1 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : str=False , lowercase_ : int=None , **lowercase_ : Union[str, Any] , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case_ = num_attention_heads
snake_case_ = num_key_value_heads
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = rms_norm_eps
snake_case_ = pretraining_tp
snake_case_ = use_cache
snake_case_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def A_ ( self : Tuple ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
snake_case_ = self.rope_scaling.get('''type''' , _A )
snake_case_ = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 640
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Any = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255
| 0
|
"""simple docstring"""
from torch import nn
def A__ ( A__ ) -> Optional[int]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 579
|
"""simple docstring"""
import datasets
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
SCREAMING_SNAKE_CASE_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self , snake_case_ , snake_case_ ) -> Dict:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
| 579
| 1
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_( A : Optional[int] , A : Optional[Any]):
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw).convert('RGB')
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711)),
])
UpperCamelCase = transform(A).unsqueeze(0).to(A)
return image
def A_( A : Any):
if "visual_encoder" in key:
UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , A)
if "blocks" in key:
UpperCamelCase = re.sub(r'blocks' , 'layers' , A)
if "attn" in key:
UpperCamelCase = re.sub(r'attn' , 'self_attn' , A)
if "norm1" in key:
UpperCamelCase = re.sub(r'norm1' , 'layer_norm1' , A)
if "norm2" in key:
UpperCamelCase = re.sub(r'norm2' , 'layer_norm2' , A)
if "encoder.norm" in key:
UpperCamelCase = re.sub(r'encoder.norm' , 'post_layernorm' , A)
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , A)
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , A)
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , A)
if "self_attn" in key:
UpperCamelCase = re.sub(r'self_attn.proj' , 'self_attn.projection' , A)
return key
@torch.no_grad()
def A_( A : List[str] , A : Any=None):
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(A)
else:
UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
UpperCamelCase = BlipForConditionalGeneration(A).eval()
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCamelCase = blip_decoder(pretrained=A , image_size=384 , vit='base')
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
hf_model.load_state_dict(A)
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=A , device='cpu')
UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased')
UpperCamelCase = tokenizer(['a picture of']).input_ids
UpperCamelCase = hf_model.generate(A , A)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase = hf_model.generate(A)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCamelCase = blip_vqa(pretrained=A , image_size=A , vit='base')
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(A)
hf_vqa_model.load_state_dict(A)
UpperCamelCase = ['How many dogs are in this image?']
UpperCamelCase = tokenizer(A , return_tensors='pt').input_ids
UpperCamelCase = hf_vqa_model.generate(A , A)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCamelCase = blip_itm(pretrained=A , image_size=A , vit='base')
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(A)
UpperCamelCase = rename_key(A)
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(A)
UpperCamelCase = ['A picture of a woman with a dog sitting in a beach']
UpperCamelCase = tokenizer(
A , return_tensors='pt' , padding='max_length' , truncation=A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(A)
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
UpperCamelCase = hf_itm_model(A , A , use_itm_head=A)
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 3
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : List[Any] ,*SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : int ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : int ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[str] ,*SCREAMING_SNAKE_CASE_ : List[str] ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Tuple ,*SCREAMING_SNAKE_CASE_ : Optional[int] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Dict ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Optional[Any] ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
| 535
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ) -> Optional[Any]:
__lowerCAmelCase : List[Any] = get_activation('swish' )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self ) -> Dict:
__lowerCAmelCase : Tuple = get_activation('silu' )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : Optional[int] = get_activation('mish' )
self.assertIsInstance(A__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self ) -> List[Any]:
__lowerCAmelCase : Optional[int] = get_activation('gelu' )
self.assertIsInstance(A__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 720
|
'''simple docstring'''
from math import sqrt
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase : Any = False
for divisor in range(2 ,int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase : int = False
break
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'status' must been from type bool"
return status
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase : Optional[int] = list(range(2 ,n + 1 ) )
__lowerCAmelCase : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 ,len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase : Dict = 0
# filters actual prime numbers.
__lowerCAmelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase : Any = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Tuple = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : int = 0
# prime factorization of 'number'
__lowerCAmelCase : Optional[Any] = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : str = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase : List[Any] = 0
# prime factorization of 'number'
__lowerCAmelCase : Dict = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,_UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A ( _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,_UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A ( _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
__lowerCAmelCase : Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase : List[Any] = get_prime_numbers(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
# run variable for while-loops.
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Union[str, Any] = None
# exit variable. for break up the loops
__lowerCAmelCase : List[str] = True
while i < len_pn and loop:
__lowerCAmelCase : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : List[Any] = 0
while numbera != 0:
__lowerCAmelCase : Tuple = numbera % numbera
__lowerCAmelCase : str = numbera
__lowerCAmelCase : Optional[int] = rest
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> str:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase : Dict = prime_factorization(_UpperCAmelCase )
__lowerCAmelCase : str = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : int = []
__lowerCAmelCase : Any = max(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase : Tuple = prime_fac_a.count(_UpperCAmelCase )
__lowerCAmelCase : Dict = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase ,_UpperCAmelCase ) ):
ans *= n
else:
__lowerCAmelCase : int = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase : Optional[int] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase : Dict = p_number_a + 1 # jump to the next number
__lowerCAmelCase : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase : str = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase : List[str] = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase : str = gcd(abs(_UpperCAmelCase ) ,abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase ,_UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A ( _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase : Optional[Any] = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def A ( _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Any = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase : Optional[Any] = ans
ans += fiba
__lowerCAmelCase : Dict = tmp
return ans
| 123
| 0
|
from __future__ import annotations
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
if not nums:
return 0
UpperCamelCase = nums[0]
UpperCamelCase = 0
for num in nums[1:]:
UpperCamelCase , UpperCamelCase = (
max_excluding + num,
max(UpperCamelCase_ , UpperCamelCase_ ),
)
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def a_ ( UpperCamelCase_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A_ = k.replace(UpperCamelCase_ , UpperCamelCase_ )
if k.startswith("encoder" ):
A_ = k.replace(".attn" , ".self_attn" )
A_ = k.replace("norm1" , "self_attn_layer_norm" )
A_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A_ = k.replace("norm1" , "self_attn_layer_norm" )
A_ = k.replace("norm2" , "encoder_attn_layer_norm" )
A_ = k.replace("norm3" , "final_layer_norm" )
return k
def a_ ( UpperCamelCase_ ):
A_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A_ = sd.pop(UpperCamelCase_ )
A_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A_ = v
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''START''']
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = torch.load(UpperCamelCase_ , map_location="cpu" )
A_ = model["model"]
A_ = BlenderbotConfig.from_json_file(UpperCamelCase_ )
A_ = BlenderbotForConditionalGeneration(UpperCamelCase_ )
A_ = m.model.state_dict().keys()
A_ = []
A_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A_ = rename_state_dict_key(UpperCamelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase_ )
m.model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
m.half()
m.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 452
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Tuple = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691
| 1
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def _snake_case ( __snake_case : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = set()
_lowerCamelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Tuple = char
_lowerCamelCase : Tuple = set(__snake_case )
return pairs
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="__start__" , SCREAMING_SNAKE_CASE="__end__" , SCREAMING_SNAKE_CASE="__unk__" , SCREAMING_SNAKE_CASE="__null__" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""") as vocab_handle:
_lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="""utf-8""") as merges_handle:
_lowerCamelCase : Dict = merges_handle.read().split("""\n""")[1:-1]
_lowerCamelCase : Optional[int] = [tuple(merge.split()) for merge in merges]
_lowerCamelCase : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE))))
_lowerCamelCase : Dict = {}
@property
def UpperCamelCase_ ( self) -> int:
return len(self.encoder)
def UpperCamelCase_ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[Any] = re.sub("""([.,!?()])""" , r""" \1""" , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = re.sub("""(')""" , r""" \1 """ , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = re.sub(r"""\s{2,}""" , """ """ , SCREAMING_SNAKE_CASE)
if "\n" in token:
_lowerCamelCase : Tuple = token.replace("""\n""" , """ __newln__""")
_lowerCamelCase : Any = token.split(""" """)
_lowerCamelCase : str = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE):
continue
_lowerCamelCase : str = token.lower()
_lowerCamelCase : Any = tuple(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = tuple(list(word[:-1]) + [word[-1] + """</w>"""])
_lowerCamelCase : Tuple = get_pairs(SCREAMING_SNAKE_CASE)
if not pairs:
words.append(SCREAMING_SNAKE_CASE)
continue
while True:
_lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE: self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("""inf""")))
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase : Tuple = bigram
_lowerCamelCase : Dict = []
_lowerCamelCase : Union[str, Any] = 0
while i < len(SCREAMING_SNAKE_CASE):
try:
_lowerCamelCase : List[Any] = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
new_word.extend(word[i:j])
_lowerCamelCase : List[str] = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCamelCase : Any = tuple(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = new_word
if len(SCREAMING_SNAKE_CASE) == 1:
break
else:
_lowerCamelCase : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = """@@ """.join(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = word[:-4]
_lowerCamelCase : Dict = word
words.append(SCREAMING_SNAKE_CASE)
return " ".join(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Any = []
_lowerCamelCase : Any = re.findall(r"""\S+\n?""" , SCREAMING_SNAKE_CASE)
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE).split(""" """)))
return split_tokens
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Tuple = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE).replace("""@@ """ , """""").strip()
return out_string
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_lowerCamelCase : int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
_lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""])
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE) + """\n""")
_lowerCamelCase : List[Any] = 0
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""") as writer:
writer.write("""#version: 0.2\n""")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""")
_lowerCamelCase : int = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE) + """\n""")
index += 1
return vocab_file, merge_file
| 88
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
for i in range(0 ,lowerCAmelCase_ ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
for i in range(lowerCAmelCase_ ,0 ,-1 ):
for _ in range(lowerCAmelCase_ ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE = 1
while K:
__SCREAMING_SNAKE_CASE = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 220
| 0
|
'''simple docstring'''
import math
class lowerCamelCase__ :
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , __A : list[list[float]] , __A : list[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 0.0
for i in range(len(__A ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self : str , __A : list[list[int | float]] , __A : list[int] , __A : int , __A : float ) -> List[str]:
'''simple docstring'''
for i in range(len(__A ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCAmelCase( ) -> None:
# Training Examples ( m, n )
lowerCAmelCase__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase__ = SelfOrganizingMap()
lowerCAmelCase__ = 3
lowerCAmelCase__ = 0.5
for _ in range(SCREAMING_SNAKE_CASE_ ):
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
# training sample
lowerCAmelCase__ = training_samples[j]
# Compute the winning vector
lowerCAmelCase__ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Update the winning vector
lowerCAmelCase__ = self_organizing_map.update(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# classify test sample
lowerCAmelCase__ = [0, 0, 0, 1]
lowerCAmelCase__ = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 703
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : int , __A : str , __A : Union[str, Any]=12 , __A : List[Any]=7 , __A : Tuple=True , __A : Any=True , __A : str=True , __A : Union[str, Any]=99 , __A : Union[str, Any]=32 , __A : Dict=32 , __A : List[Any]=2 , __A : Union[str, Any]=4 , __A : int=37 , __A : Tuple=0.1 , __A : Optional[Any]=0.1 , __A : str=512 , __A : Dict=0.0_2 , __A : str=0 , __A : Optional[int]=None , ) -> int:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = bos_token_id
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase__ = input_mask.numpy()
lowerCAmelCase__ ,lowerCAmelCase__ = input_mask.shape
lowerCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(__A )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : List[str] , __A : str , __A : Tuple , __A : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFBlipTextModel(config=__A )
lowerCAmelCase__ = model(__A , attention_mask=__A , training=__A )
lowerCAmelCase__ = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A, unittest.TestCase ):
'''simple docstring'''
A__ = (TFBlipTextModel,) if is_tf_available() else ()
A__ = False
A__ = False
A__ = False
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = BlipTextModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFBlipTextModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( self : Union[str, Any] , __A : Optional[Any]=True ) -> Union[str, Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__A )
| 211
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = '''▁'''
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowercase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowercase = {'''vinai/bartpho-syllable''': 1024}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :List[Any] = vocab_file
__UpperCamelCase :Optional[int] = monolingual_vocab_file
__UpperCamelCase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase :Dict = {}
__UpperCamelCase :Dict = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowercase) not in self.fairseq_tokens_to_ids:
__UpperCamelCase :str = cnt
cnt += 1
with open(__lowercase , '''r''' , encoding='''utf-8''') as f:
for line in f.readlines():
__UpperCamelCase :Optional[Any] = line.strip().split()[0]
__UpperCamelCase :str = len(self.fairseq_tokens_to_ids)
if str(__lowercase) not in self.fairseq_tokens_to_ids:
__UpperCamelCase :str = len(self.fairseq_tokens_to_ids)
__UpperCamelCase :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.__dict__.copy()
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :Any = {}
__UpperCamelCase :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Optional[int] = [self.cls_token_id]
__UpperCamelCase :str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :int = [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase__ ( self) -> str:
return len(self.fairseq_ids_to_tokens)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase__ ( self , __lowercase) -> str:
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :int = ''''''.join(__lowercase).replace(__lowercase , ''' ''').strip()
return out_string
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :int = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :str = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :str = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
__lowercase) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , __lowercase)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(__lowercase , '''w''' , encoding='''utf-8''') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__lowercase)} \n""")
return out_vocab_file, out_monolingual_vocab_file
| 167
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowercase = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 167
| 1
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A : str = threading.Lock()
A : Optional[logging.Handler] = None
A : Tuple = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A : List[str] = logging.WARNING
A : Tuple = True
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = os.getenv("TRANSFORMERS_VERBOSITY" , _snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def snake_case__ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def snake_case__ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def snake_case__ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase__ = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase__ = False
def snake_case__ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase__ = None
def snake_case__ ( ):
"""simple docstring"""
return log_levels
def snake_case__ ( _snake_case : Optional[str] = None ):
"""simple docstring"""
if name is None:
UpperCamelCase__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
return set_verbosity(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
return set_verbosity(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
return set_verbosity(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
return set_verbosity(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def snake_case__ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def snake_case__ ( _snake_case : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_snake_case )
def snake_case__ ( _snake_case : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
_configure_library_root_logger()
UpperCamelCase__ = False
def snake_case__ ( ):
"""simple docstring"""
_configure_library_root_logger()
UpperCamelCase__ = True
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_snake_case )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_snake_case )
def snake_case__ ( self : Union[str, Any] , *_snake_case : int , **_snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _snake_case )
if no_advisory_warnings:
return
self.warning(*_snake_case , **_snake_case )
A : List[str] = warning_advice
@functools.lru_cache(_snake_case )
def snake_case__ ( self : int , *_snake_case : List[str] , **_snake_case : Dict ):
"""simple docstring"""
self.warning(*_snake_case , **_snake_case )
A : Any = warning_once
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Any ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase__ = args[0] if args else None
def __iter__( self :List[str] ) -> str:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Tuple:
"""simple docstring"""
def empty_fn(*lowerCamelCase_ :int , **lowerCamelCase_ :Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :List[Any] ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] ) -> Any:
"""simple docstring"""
return
class lowerCAmelCase :
'''simple docstring'''
def __call__( self :int , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , *lowerCamelCase_ :Any , **lowerCamelCase_ :int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A : Dict = _tqdm_cls()
def snake_case__ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ):
"""simple docstring"""
global _tqdm_active
UpperCamelCase__ = True
hf_hub_utils.enable_progress_bars()
def snake_case__ ( ):
"""simple docstring"""
global _tqdm_active
UpperCamelCase__ = False
hf_hub_utils.disable_progress_bars()
| 718
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A : List[str] = logging.getLogger(__name__)
A : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
A : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
A = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
A = field(
default=snake_case__ , metadata={'help': 'The input training data file (a text file).'} )
A = field(
default=snake_case__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
A = field(
default=snake_case__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
A = field(
default=snake_case__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
A = field(
default=snake_case__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
A = field(default=snake_case__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
A = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
A = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
A = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
A = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
A = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case__ ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ):
"""simple docstring"""
def _dataset(_snake_case : str , _snake_case : int=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
UpperCamelCase__ = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
UpperCamelCase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase__ = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase__ = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase__ = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase__ = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
UpperCamelCase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = math.exp(eval_output["eval_loss"] )
UpperCamelCase__ = {"perplexity": perplexity}
UpperCamelCase__ = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(_snake_case , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _snake_case , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(_snake_case )
return results
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 304
| 0
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowercase__ : Tuple = logging.get_logger(__name__)
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_UpperCamelCase = json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_UpperCamelCase = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_UpperCamelCase = json.loads(lowercase )
if not mpi_options.get('''sagemaker_mpi_enabled''', lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def snake_case__ ( self : List[str] ) -> Any:
'''simple docstring'''
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , lowerCAmelCase__ , )
@cached_property
def snake_case__ ( self : Any ) -> "torch.device":
'''simple docstring'''
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
_UpperCamelCase = torch.device('''cpu''' )
_UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
_UpperCamelCase = smp.local_rank()
_UpperCamelCase = torch.device('''cuda''' , lowerCAmelCase__ )
_UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
_UpperCamelCase = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
_UpperCamelCase = torch.device('''cuda''' , self.local_rank )
_UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_UpperCamelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
_UpperCamelCase = torch.device('''cuda''' , self.local_rank )
_UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase__ )
return device
@property
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
return False
| 98
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '''gpt_neox_japanese'''
def __init__( self : Union[str, Any] , __UpperCamelCase : str=3_2_0_0_0 , __UpperCamelCase : List[Any]=2_5_6_0 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : List[Any]=1.00 , __UpperCamelCase : Any=1_0_0_0_0 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : List[str]=1E-5 , __UpperCamelCase : str=True , __UpperCamelCase : str=3_1_9_9_6 , __UpperCamelCase : int=3_1_9_9_9 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , **__UpperCamelCase : List[str] , ):
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_multiple_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = rotary_pct
lowerCamelCase_ = rotary_emb_base
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_cache
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = hidden_dropout
| 272
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A__ ( UpperCamelCase ):
A = 384
A = 7
if "tiny" in model_name:
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif "small" in model_name:
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif "base" in model_name:
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
A = 12
A = 512
elif "large" in model_name:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
A = 12
A = 768
# set label information
A = 150
A = "huggingface/label-files"
A = "ade20k-id2label.json"
A = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
A = {int(UpperCamelCase ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = SwinConfig(
embed_dim=UpperCamelCase , depths=UpperCamelCase , num_heads=UpperCamelCase , window_size=UpperCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
A = UperNetConfig(
backbone_config=UpperCamelCase , auxiliary_in_channels=UpperCamelCase , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid=UpperCamelCase , )
return config
def A__ ( UpperCamelCase ):
A = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = dct.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
A = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:dim, :]
A = in_proj_bias[: dim]
A = in_proj_weight[
dim : dim * 2, :
]
A = in_proj_bias[
dim : dim * 2
]
A = in_proj_weight[
-dim :, :
]
A = in_proj_bias[-dim :]
# fmt: on
def A__ ( UpperCamelCase ):
A, A = x.shape
A = x.reshape(UpperCamelCase , 4 , in_channel // 4 )
A = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
return x
def A__ ( UpperCamelCase ):
A, A = x.shape
A = x.reshape(UpperCamelCase , in_channel // 4 , 4 )
A = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
return x
def A__ ( UpperCamelCase ):
A = x.shape[0]
A = x.reshape(4 , in_channel // 4 )
A = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase )
return x
def A__ ( UpperCamelCase ):
A = x.shape[0]
A = x.reshape(in_channel // 4 , 4 )
A = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase )
return x
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , file_name=UpperCamelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(UpperCamelCase , param.shape )
A = get_upernet_config(UpperCamelCase )
A = UperNetForSemanticSegmentation(UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A = state_dict.pop(UpperCamelCase )
if "bn" in key:
A = key.replace("bn" , "batch_norm" )
A = val
# rename keys
A = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A = reverse_correct_unfold_reduction_order(UpperCamelCase )
if "norm" in key:
A = reverse_correct_unfold_norm_order(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify on image
A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" )
A = SegformerImageProcessor()
A = processor(UpperCamelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
A = model(UpperCamelCase )
A = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
A = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
A = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
A = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 524
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_snake_case : List[str] = Lock()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase , UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase , UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase )
def A__ ( UpperCamelCase ):
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase , args=(
len(UpperCamelCase ) - 1,
arr[len(UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A__ ( ):
A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCamelCase )
A = odd_even_transposition(UpperCamelCase )
print("Sorted List\n" )
print(*UpperCamelCase )
if __name__ == "__main__":
main()
| 524
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = filter(lambda _lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : int = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Any = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : List[Any] = ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowercase , verbose=_lowercase , )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Tuple = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Dict = od / '''test_results.txt'''
UpperCAmelCase_ : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : int = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,'''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ):
UpperCAmelCase_ : Any = val.item()
UpperCAmelCase_ : Any = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Tuple = pl_module.model.num_parameters()
UpperCAmelCase_ : Optional[int] = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''test''' )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 30
|
from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCAmelCase_ ):
_snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_snake_case = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 703
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = object_detector(examples[0] , threshold=0.0 )
_snake_case = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
'score': ANY(lowerCAmelCase_ ),
'label': ANY(lowerCAmelCase_ ),
'box': {'xmin': ANY(lowerCAmelCase_ ), 'ymin': ANY(lowerCAmelCase_ ), 'xmax': ANY(lowerCAmelCase_ ), 'ymax': ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_snake_case = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
_snake_case = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
_snake_case = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0.2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 2
_snake_case = pipeline('zero-shot-object-detection' )
_snake_case = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 542
| 0
|
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_validate_point(UpperCAmelCase )
_validate_point(UpperCAmelCase )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ) ) )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
if point:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for item in point:
if not isinstance(UpperCAmelCase , (int, float) ):
lowerCamelCase__ : str = (
'''Expected a list of numbers as input, found '''
f"{type(UpperCAmelCase ).__name__}"
)
raise TypeError(UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = f"Expected a list of numbers as input, found {type(UpperCAmelCase ).__name__}"
raise TypeError(UpperCAmelCase )
else:
raise ValueError('''Missing an input''' )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
_validate_point(UpperCAmelCase )
_validate_point(UpperCAmelCase )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase , UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Optional[Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.image_processor
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :int = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__magic_name__ :Dict = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__magic_name__ :Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 0
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"):
A_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase)
A_ : Tuple = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Any):
# max_length=None => use the model max length (it's actually the default)
A_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : str = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : str):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
A_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
A_ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple):
model.eval()
A_ : Union[str, Any] = 0
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Optional[int] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
A_ : Tuple = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase) - 1:
A_ : int = predictions[: len(eval_dataloader.dataset) - samples_seen]
A_ : List[str] = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : str = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
# Initialize accelerator
A_ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[str] = config["""lr"""]
A_ : Optional[int] = int(config["""num_epochs"""])
A_ : Optional[Any] = int(config["""seed"""])
A_ : str = int(config["""batch_size"""])
A_ : Any = args.model_name_or_path
set_seed(lowerCamelCase)
A_ : Optional[Any] = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase)
# Instantiate optimizer
A_ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase)
if accelerator.state.deepspeed_plugin is not None:
A_ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : Dict = 1
A_ : Optional[int] = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Any = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
A_ : Optional[Any] = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Optional[Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# We need to keep track of how many total steps we have iterated over
A_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : Dict = 0
A_ : Optional[Any] = evaluate.load("""glue""" , """mrpc""")
A_ : str = num_epochs
if args.partial_train_epoch is not None:
A_ : Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
A_ : List[str] = args.resume_from_checkpoint.split("""epoch_""")[1]
A_ : Optional[Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : Optional[Any] = int(lowerCamelCase) + 1
A_ : Dict = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase)
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0])
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""])
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json') , """r""") as f:
A_ : Union[str, Any] = json.load(lowerCamelCase)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : Tuple = {}
for epoch in range(lowerCamelCase , lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
A_ : Dict = model(**lowerCamelCase)
A_ : str = outputs.loss
A_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : List[str] = F'epoch_{epoch}'
A_ : Optional[Any] = os.path.join(args.output_dir , lowerCamelCase)
accelerator.save_state(lowerCamelCase)
A_ : List[Any] = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : str = accuracy
A_ : List[Any] = lr_scheduler.get_lr()[0]
A_ : int = optimizer.param_groups[0]["""lr"""]
A_ : Any = epoch
A_ : Optional[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json') , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase , default=lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase , default=lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=2 , help="""Number of train epochs.""" , )
A_ : Tuple = parser.parse_args()
A_ : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'philschmid/bart-large-cnn-samsum'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
SCREAMING_SNAKE_CASE_ = 'summarizer'
SCREAMING_SNAKE_CASE_ = AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE_ = ['text']
SCREAMING_SNAKE_CASE_ = ['text']
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return self.model.generate(**SCREAMING_SNAKE_CASE_ )[0]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
return self.pre_processor.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
| 42
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :List[str] = logging.get_logger(__name__)
lowercase__ :List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : int = 'bridgetower_vision_model'
def __init__( self : str , __lowercase : Optional[Any]=768 , __lowercase : Tuple=12 , __lowercase : List[str]=3 , __lowercase : Any=16 , __lowercase : int=288 , __lowercase : List[Any]=1 , __lowercase : Any=1e-05 , __lowercase : int=False , __lowercase : Any=True , __lowercase : Any=False , **__lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Union[str, Any] = initializer_factor
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[int] = stop_gradient
__UpperCAmelCase : List[str] = share_layernorm
__UpperCAmelCase : int = remove_last_layer
@classmethod
def A_ ( cls : Optional[int] , __lowercase : Union[str, os.PathLike] , **__lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : str = 'bridgetower_text_model'
def __init__( self : Any , __lowercase : Dict=50_265 , __lowercase : int=768 , __lowercase : str=12 , __lowercase : Union[str, Any]=12 , __lowercase : str=1 , __lowercase : List[Any]=3_072 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=514 , __lowercase : List[str]=1 , __lowercase : Any=1e-05 , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Optional[int]="absolute" , __lowercase : Tuple=True , **__lowercase : str , ):
'''simple docstring'''
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = position_embedding_type
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : Optional[Any] = pad_token_id
__UpperCAmelCase : Dict = bos_token_id
__UpperCAmelCase : Tuple = eos_token_id
@classmethod
def A_ ( cls : str , __lowercase : Union[str, os.PathLike] , **__lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = cls.get_config_dict(__lowercase , **__lowercase )
if config_dict.get('''model_type''' ) == "bridgetower":
__UpperCAmelCase : List[str] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[int] = 'bridgetower'
def __init__( self : int , __lowercase : Any=True , __lowercase : List[Any]="gelu" , __lowercase : int=768 , __lowercase : Tuple=1 , __lowercase : List[Any]=1e-05 , __lowercase : Optional[Any]=False , __lowercase : str="add" , __lowercase : int=12 , __lowercase : Optional[int]=6 , __lowercase : List[str]=False , __lowercase : Union[str, Any]=False , __lowercase : Tuple=None , __lowercase : str=None , **__lowercase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = kwargs.pop('''text_config_dict''' , __lowercase )
__UpperCAmelCase : Any = kwargs.pop('''vision_config_dict''' , __lowercase )
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[Any] = share_cross_modal_transformer_layers
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : List[Any] = initializer_factor
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Dict = share_link_tower_layers
__UpperCAmelCase : Any = link_tower_type
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = tie_word_embeddings
__UpperCAmelCase : Union[str, Any] = init_layernorm_from_vision_encoder
if text_config is None:
__UpperCAmelCase : List[str] = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
__UpperCAmelCase : List[str] = BridgeTowerTextConfig(**__lowercase )
__UpperCAmelCase : List[Any] = BridgeTowerVisionConfig(**__lowercase )
@classmethod
def A_ ( cls : Any , __lowercase : BridgeTowerTextConfig , __lowercase : BridgeTowerVisionConfig , **__lowercase : Optional[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 522
| 0
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('DataClass', Any)
A = NewType('DataClassType', Any)
def UpperCAmelCase ( UpperCAmelCase__ : Dict):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''')
def UpperCAmelCase ( UpperCAmelCase__ : List[Any]):
lowerCamelCase : Tuple = {str(SCREAMING_SNAKE_CASE_): choice for choice in choices}
return lambda UpperCAmelCase__: str_to_choice.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def UpperCAmelCase ( *,
UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : List[str] = dataclasses.MISSING , UpperCAmelCase__ : int = dataclasses.MISSING , UpperCAmelCase__ : Optional[Any] = None , **UpperCAmelCase__ : Union[str, Any] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase : Optional[Any] = {}
if aliases is not None:
lowerCamelCase : Dict = aliases
if help is not None:
lowerCamelCase : int = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , default_factory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
class __snake_case ( a__):
_lowerCAmelCase = 42
def __init__( self, A, **A ):
"""simple docstring"""
if "formatter_class" not in kwargs:
lowerCamelCase : Any = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
lowerCamelCase : Tuple = [dataclass_types]
lowerCamelCase : Optional[int] = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCAmelCase_ ( A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = F'''--{field.name}'''
lowerCamelCase : Union[str, Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, A ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCamelCase : Dict = kwargs.pop('aliases', [] )
if isinstance(A, A ):
lowerCamelCase : Union[str, Any] = [aliases]
lowerCamelCase : Union[str, Any] = getattr(field.type, '__origin__', field.type )
if origin_type is Union or (hasattr(A, 'UnionType' ) and isinstance(A, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase : Optional[Any] = getattr(field.type, '__origin__', field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase : Optional[int] = (
field.type.__args__[0] if isinstance(A, field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase : Optional[Any] = getattr(field.type, '__origin__', field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase : Any = {}
if origin_type is Literal or (isinstance(field.type, A ) and issubclass(field.type, A )):
if origin_type is Literal:
lowerCamelCase : Optional[Any] = field.type.__args__
else:
lowerCamelCase : List[str] = [x.value for x in field.type]
lowerCamelCase : Optional[int] = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCamelCase : Union[str, Any] = field.default
else:
lowerCamelCase : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase : List[str] = copy(A )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase : Any = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase : Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase : List[str] = '?'
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase : List[Any] = True
elif isclass(A ) and issubclass(A, A ):
lowerCamelCase : str = field.type.__args__[0]
lowerCamelCase : List[Any] = '+'
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase : Any = True
else:
lowerCamelCase : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase : Tuple = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase : Dict = field.default_factory()
else:
lowerCamelCase : Dict = True
parser.add_argument(A, *A, **A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase : str = False
parser.add_argument(F'''--no_{field.name}''', action='store_false', dest=field.name, **A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if hasattr(A, '_argument_group_name' ):
lowerCamelCase : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase : Optional[Any] = self
try:
lowerCamelCase : Dict[str, type] = get_type_hints(A )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
lowerCamelCase : List[str] = '.'.join(map(A, sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
lowerCamelCase : Dict = type_hints[field.name]
self._parse_dataclass_field(A, A )
def UpperCAmelCase_ ( self, A=None, A=False, A=True, A=None, A=None, ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase : List[str] = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase : Tuple = ArgumentParser()
args_file_parser.add_argument(A, type=A, action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase : Dict = args_file_parser.parse_known_args(args=A )
lowerCamelCase : Optional[int] = vars(A ).get(args_file_flag.lstrip('-' ), A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
lowerCamelCase : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase : int = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase : Optional[Any] = self.parse_known_args(args=A )
lowerCamelCase : Union[str, Any] = []
for dtype in self.dataclass_types:
lowerCamelCase : Union[str, Any] = {f.name for f in dataclasses.fields(A ) if f.init}
lowerCamelCase : Optional[int] = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A, A )
lowerCamelCase : Optional[int] = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase_ ( self, A, A = False ):
"""simple docstring"""
lowerCamelCase : int = set(args.keys() )
lowerCamelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCamelCase : List[str] = {f.name for f in dataclasses.fields(A ) if f.init}
lowerCamelCase : str = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase : str = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCAmelCase_ ( self, A, A = False ):
"""simple docstring"""
with open(Path(A ), encoding='utf-8' ) as open_json_file:
lowerCamelCase : int = json.loads(open_json_file.read() )
lowerCamelCase : List[Any] = self.parse_dict(A, allow_extra_keys=A )
return tuple(A )
def UpperCAmelCase_ ( self, A, A = False ):
"""simple docstring"""
lowerCamelCase : str = self.parse_dict(yaml.safe_load(Path(A ).read_text() ), allow_extra_keys=A )
return tuple(A )
| 703
|
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( a__ , unittest.TestCase):
_lowerCAmelCase = BertTokenizer
_lowerCAmelCase = BertTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : str = 'UNwant\u00E9d,running'
lowerCamelCase : List[str] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Dict = 'UNwant\u00E9d,running'
lowerCamelCase : Tuple = tokenizer.tokenize(A )
lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = tokenizer.encode(A, add_special_tokens=A )
lowerCamelCase : List[Any] = rust_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Optional[Any] = tokenizer.encode(A )
lowerCamelCase : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A, A )
# With lower casing
lowerCamelCase : Union[str, Any] = self.get_tokenizer(do_lower_case=A )
lowerCamelCase : List[Any] = self.get_rust_tokenizer(do_lower_case=A )
lowerCamelCase : Optional[int] = 'UNwant\u00E9d,running'
lowerCamelCase : Optional[Any] = tokenizer.tokenize(A )
lowerCamelCase : str = rust_tokenizer.tokenize(A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = tokenizer.encode(A, add_special_tokens=A )
lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Any = tokenizer.encode(A )
lowerCamelCase : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ), ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['h\u00E9llo'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=A, never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = BasicTokenizer()
lowerCamelCase : Optional[Any] = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase : int = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A ), A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase : Optional[int] = {}
for i, token in enumerate(A ):
lowerCamelCase : int = i
lowerCamelCase : List[Any] = WordpieceTokenizer(vocab=A, unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ), [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ), ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ), ['[UNK]', 'runn', '##ing'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase : Any = tokenizer.encode('sequence builders', add_special_tokens=A )
lowerCamelCase : Tuple = tokenizer.encode('multi-sequence build', add_special_tokens=A )
lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCamelCase : List[str] = tokenizer_r.encode_plus(
A, return_attention_mask=A, return_token_type_ids=A, return_offsets_mapping=A, add_special_tokens=A, )
lowerCamelCase : int = tokenizer_r.do_lower_case if hasattr(A, 'do_lower_case' ) else False
lowerCamelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'] )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['的', '人', '有']
lowerCamelCase : List[str] = ''.join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : Dict = True
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Union[str, Any] = tokenizer_p.encode(A, add_special_tokens=A )
lowerCamelCase : Dict = tokenizer_r.encode(A, add_special_tokens=A )
lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A )
lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A, A )
self.assertListEqual(A, A )
lowerCamelCase : int = False
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(A, **A )
lowerCamelCase : int = tokenizer_r.encode(A, add_special_tokens=A )
lowerCamelCase : List[str] = tokenizer_p.encode(A, add_special_tokens=A )
lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A )
lowerCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase : Tuple = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A, A )
self.assertListEqual(A, A )
| 449
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=[30, 30] , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__=3 , lowercase__=None , lowercase__=8 , lowercase__=10 , ) -> int:
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = scope
__A = n_targets
__A = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A = num_patches + 1 + self.num_detection_tokens
def _lowerCamelCase ( self ) -> Dict:
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__A = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A = []
for i in range(self.batch_size ):
__A = {}
__A = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=lowercase__ )
__A = torch.rand(self.n_targets , 4 , device=lowercase__ )
labels.append(lowercase__ )
__A = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> str:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__A = YolosModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__A = YolosForObjectDetection(lowercase__ )
model.to(lowercase__ )
model.eval()
__A = model(pixel_values=lowercase__ )
__A = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__A = model(pixel_values=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCamelCase ( self ) -> str:
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[str] = False
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]:
__A = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A = []
for i in range(self.model_tester.batch_size ):
__A = {}
__A = torch.ones(
size=(self.model_tester.n_targets,) , device=lowercase__ , dtype=torch.long )
__A = torch.ones(
self.model_tester.n_targets , 4 , device=lowercase__ , dtype=torch.float )
labels.append(lowercase__ )
__A = labels
return inputs_dict
def _lowerCamelCase ( self ) -> int:
__A = YolosModelTester(self )
__A = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _lowerCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _lowerCamelCase ( self ) -> List[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowercase__ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def _lowerCamelCase ( self ) -> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _lowerCamelCase ( self ) -> int:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = True
# in YOLOS, the seq_len is different
__A = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A = True
__A = False
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A = len(lowercase__ )
# Check attention is always last and order is fine
__A = True
__A = True
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = 1
self.assertEqual(out_len + added_hidden_states , len(lowercase__ ) )
__A = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ) -> Any:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__A = outputs.hidden_states
__A = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase__ ) , lowercase__ )
# YOLOS has a different seq_length
__A = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase ( self ) -> List[str]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowercase__ )
@slow
def _lowerCamelCase ( self ) -> List[str]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = YolosModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ) -> Optional[int]:
__A = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(lowercase__ )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=lowercase__ , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__A = model(inputs.pixel_values )
# verify outputs
__A = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__A = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=lowercase__ , )
__A = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase__ , atol=1e-4 ) )
# verify postprocessing
__A = image_processor.post_process_object_detection(
lowercase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__A = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowercase__ )
__A = [75, 75, 17, 63, 17]
__A = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowercase__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , lowercase__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , lowercase__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , lowercase__ ) )
| 205
|
import operator as op
snake_case_ : Optional[Any] ='''scaler.pt'''
snake_case_ : Any ='''pytorch_model'''
snake_case_ : Optional[Any] ='''random_states'''
snake_case_ : Tuple ='''optimizer'''
snake_case_ : str ='''scheduler'''
snake_case_ : str ='''pytorch_model.bin'''
snake_case_ : Optional[Any] ='''pytorch_model.bin.index.json'''
snake_case_ : str ='''model.safetensors'''
snake_case_ : Optional[int] ='''model.safetensors.index.json'''
snake_case_ : Dict ='''1.10.2'''
snake_case_ : List[str] ='''py38'''
snake_case_ : List[str] ='''4.17.0'''
snake_case_ : Optional[int] =['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
snake_case_ : Optional[int] =['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
snake_case_ : Any =['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
snake_case_ : Optional[int] =['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
snake_case_ : Optional[Any] =['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
snake_case_ : str ='''2.0.1'''
snake_case_ : Tuple =['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
snake_case_ : Union[str, Any] =['''default''', '''reduce-overhead''', '''max-autotune''']
snake_case_ : Optional[int] ={'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case_ : Dict =[
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
snake_case_ : Tuple =['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
snake_case_ : Union[str, Any] =['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 205
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase_ = logging.get_logger(__name__)
def snake_case ( A__ ,A__ ,A__ ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def snake_case ( A__ ,A__ ,A__ = None ):
UpperCAmelCase_ : Optional[Any] = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCAmelCase_ : Tuple = to_pil_image(snake_case__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = pil_image.size
UpperCAmelCase_ : Optional[int] = pytesseract.image_to_data(snake_case__ ,lang=snake_case__ ,output_type="dict" ,config=snake_case__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCAmelCase_ : Optional[Any] = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()]
UpperCAmelCase_ : Any = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices]
UpperCAmelCase_ : str = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Optional[int] = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
UpperCAmelCase_ : Dict = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
UpperCAmelCase_ : List[Any] = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase_ : int = []
for x, y, w, h in zip(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
UpperCAmelCase_ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(snake_case__ )
# finally, normalize the bounding boxes
UpperCAmelCase_ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ ,snake_case__ ,snake_case__ ) )
assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase_ (__A ):
__magic_name__ = ['pixel_values']
def __init__( self : int , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = "" , **lowerCAmelCase_ : Optional[Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : int = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Dict = resample
UpperCAmelCase_ : List[Any] = apply_ocr
UpperCAmelCase_ : Union[str, Any] = ocr_lang
UpperCAmelCase_ : Optional[int] = tesseract_config
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
UpperCAmelCase_ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
UpperCAmelCase_ : Optional[int] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> PIL.Image.Image:
UpperCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase_ : str = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase_ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase_ : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[Any] = []
for image in images:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
UpperCAmelCase_ : Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCAmelCase_ : str = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_ : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_ : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
UpperCAmelCase_ : List[Any] = words_batch
UpperCAmelCase_ : int = boxes_batch
return data
| 95
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
a :List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
a :Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowercase ( self : Tuple ) -> Dict:
lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
] , )
lowercase_ = text_generator.model.config.eos_token_id
lowercase_ = '''<pad>'''
lowercase_ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE_ )},
],
] , )
@require_tf
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowercase_ = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowercase_ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
lowercase_ = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_generator, ["This is a test", "Another test"]
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = '''Hello I believe in'''
lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , stop_sequence=''' fe''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = text_generator.model
lowercase_ = text_generator.tokenizer
lowercase_ = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase_ = pipeline(task='''text-generation''' , model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , return_full_text=SCREAMING_SNAKE_CASE_ )
lowercase_ = text_generator('''This is a test''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase_ = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase_ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase_ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
[{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_text=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase_ = text_generator('''test''' , return_text=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase_ = text_generator('''''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowercase_ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase_ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_0_0 , max_new_tokens=2_0 )
lowercase_ = text_generator('''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_generator(
'''This is a test''' * 5_0_0 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowercase ( self : Union[str, Any] ) -> Tuple:
import torch
# Classic `model_kwargs`
lowercase_ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowercase_ = pipe('''This is a test''' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _lowercase ( self : Optional[int] ) -> List[Any]:
import torch
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowercase ( self : int ) -> Dict:
import torch
lowercase_ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE_ , top_p=0.5 )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = '''Hello world'''
lowercase_ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowercase_ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowercase_ = logging.get_logger('''transformers.generation.utils''' )
lowercase_ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
lowercase_ = text_generator(SCREAMING_SNAKE_CASE_ , max_length=1_0 )
self.assertNotIn(SCREAMING_SNAKE_CASE_ , cl.out )
| 97
| 0
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
def __init__( self :int ,__lowercase :Union[str, Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Dict=7 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=True ,__lowercase :Tuple=False ,__lowercase :Optional[int]=True ,__lowercase :Optional[int]=9_9 ,__lowercase :Optional[Any]=3_2 ,__lowercase :Union[str, Any]=5 ,__lowercase :Dict=4 ,__lowercase :Optional[Any]=3_7 ,__lowercase :Optional[int]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :str=5_1_2 ,__lowercase :str=1_6 ,__lowercase :Optional[Any]=2 ,__lowercase :Union[str, Any]=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :Optional[Any]=4 ,__lowercase :Any=None ,):
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Tuple = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : int = use_token_type_ids
snake_case__ : List[str] = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[Any] = type_vocab_size
snake_case__ : List[Any] = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Optional[int] = scope
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Tuple = None
if self.use_token_type_ids:
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Optional[Any] ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :int ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ):
snake_case__ : int = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :Tuple ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : List[str] = True
snake_case__ : Union[str, Any] = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,)
snake_case__ : str = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,)
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :int ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[Any] ,):
snake_case__ : Optional[int] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : int = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Tuple ,__lowercase :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :List[str] ,):
snake_case__ : int = True
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : Tuple = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,)
snake_case__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
snake_case__ : Any = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Any = False
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Any = LlamaModelTester(self )
snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = 3
snake_case__ : Union[str, Any] = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[str] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : List[Any] = '''single_label_classification'''
snake_case__ : Tuple = input_dict['''input_ids''']
snake_case__ : Optional[int] = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Dict = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = 3
snake_case__ : Optional[int] = '''multi_label_classification'''
snake_case__ : str = input_dict['''input_ids''']
snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase )
snake_case__ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Optional[int] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCamelCase ( self :Dict ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Tuple ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = ids_tensor([1, 1_0] ,config.vocab_size )
snake_case__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Any = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
snake_case__ : Any = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : str = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state
snake_case__ : Dict = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
snake_case__ : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
snake_case__ : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
snake_case__ : Any = model(torch.tensor(__lowercase ) )
snake_case__ : Optional[int] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
snake_case__ : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : Tuple = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that '''
snake_case__ : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : List[Any] = tokenizer.encode(__lowercase ,return_tensors='''pt''' )
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase )
# greedy generation outputs
snake_case__ : int = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase )
snake_case__ : Union[str, Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
| 219
|
import unittest
from transformers import DonutProcessor
A__ = '''naver-clova-ix/donut-base'''
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = DonutProcessor.from_pretrained(__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case__ : List[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case__ : Any = self.processor.tokenajson(__lowercase )
self.assertDictEqual(__lowercase ,__lowercase )
| 219
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def A__ ( __A : Tuple , __A : Optional[Any] , __A : List[str] ) ->Optional[int]:
__A =state_dict.pop(_lowercase )
__A =val
def A__ ( __A : Any ) ->Union[str, Any]:
__A =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__A =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__A =value
else:
__A =value
return new_state_dict
def A__ ( __A : str , __A : Dict=False ) ->str:
__A =''
if is_panoptic:
__A ='conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__A =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__A =state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__A =in_proj_weight[:2_56, :]
__A =in_proj_bias[:2_56]
__A =in_proj_weight[2_56:5_12, :]
__A =in_proj_bias[2_56:5_12]
__A =in_proj_weight[-2_56:, :]
__A =in_proj_bias[-2_56:]
def A__ ( ) ->List[str]:
__A ='http://images.cocodataset.org/val2017/000000039769.jpg'
__A =Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A__ ( __A : Optional[int] , __A : Union[str, Any] ) ->int:
__A =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__A ='resnet101'
if "dc5" in model_name:
__A =True
__A ='panoptic' in model_name
if is_panoptic:
__A =2_50
else:
__A =91
__A ='huggingface/label-files'
__A ='coco-detection-id2label.json'
__A =json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
__A ={int(_lowercase ): v for k, v in idalabel.items()}
__A =idalabel
__A ={v: k for k, v in idalabel.items()}
# load image processor
__A ='coco_panoptic' if is_panoptic else 'coco_detection'
__A =ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
__A =prepare_img()
__A =image_processor(images=_lowercase , return_tensors='''pt''' )
__A =encoding['pixel_values']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
__A =torch.hub.load('''DeppMeng/ConditionalDETR''' , _lowercase , pretrained=_lowercase ).eval()
__A =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__A ='conditional_detr.' + src
rename_key(_lowercase , _lowercase , _lowercase )
__A =rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__A ='conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__A =state_dict.pop(_lowercase )
__A =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__A =state_dict.pop(_lowercase )
__A =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__A =state_dict.pop(_lowercase )
__A =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__A =state_dict.pop(_lowercase )
__A =val
# finally, create HuggingFace model and load state dict
__A =ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__A =conditional_detr(_lowercase )
__A =model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 184
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase: List[Any] = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase: Dict = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 266
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase="pt" ):
__magic_name__ : Optional[Any] ={"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase ) and not line.startswith(""" """ ) else {}
__magic_name__ : int =padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ):
__magic_name__ : Tuple =input_ids.ne(lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __A ( UpperCamelCase__ ):
def __init__( self :Tuple , __snake_case :str , __snake_case :Dict , __snake_case :List[Any] , __snake_case :str , __snake_case :Union[str, Any]="train" , __snake_case :Any=None , __snake_case :Tuple=None , __snake_case :Optional[int]=None , __snake_case :Optional[Any]="" , ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[Any] =Path(__snake_case ).joinpath(type_path + """.source""" )
__magic_name__ : List[str] =Path(__snake_case ).joinpath(type_path + """.target""" )
__magic_name__ : Optional[int] =self.get_char_lens(self.src_file )
__magic_name__ : Optional[Any] =max_source_length
__magic_name__ : int =max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__magic_name__ : Optional[int] =tokenizer
__magic_name__ : Union[str, Any] =prefix
if n_obs is not None:
__magic_name__ : int =self.src_lens[:n_obs]
__magic_name__ : Dict =src_lang
__magic_name__ : Dict =tgt_lang
def __len__( self :Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self :Tuple , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =index + 1 # linecache starts at 1
__magic_name__ : Tuple =self.prefix + linecache.getline(str(self.src_file ) , __snake_case ).rstrip("""\n""" )
__magic_name__ : List[Any] =linecache.getline(str(self.tgt_file ) , __snake_case ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__magic_name__ : List[Any] =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , __snake_case ) else self.tokenizer
)
__magic_name__ : Optional[Any] =self.tokenizer.generator if isinstance(self.tokenizer , __snake_case ) else self.tokenizer
__magic_name__ : Optional[int] =encode_line(__snake_case , __snake_case , self.max_source_length , """right""" )
__magic_name__ : Optional[Any] =encode_line(__snake_case , __snake_case , self.max_target_length , """right""" )
__magic_name__ : Optional[Any] =source_inputs["""input_ids"""].squeeze()
__magic_name__ : Optional[int] =target_inputs["""input_ids"""].squeeze()
__magic_name__ : Any =source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A__ ( __snake_case :Optional[int] ):
'''simple docstring'''
return [len(__snake_case ) for x in Path(__snake_case ).open().readlines()]
def A__ ( self :Optional[Any] , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : str =torch.stack([x["""input_ids"""] for x in batch] )
__magic_name__ : List[str] =torch.stack([x["""attention_mask"""] for x in batch] )
__magic_name__ : Union[str, Any] =torch.stack([x["""decoder_input_ids"""] for x in batch] )
__magic_name__ : Union[str, Any] =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __snake_case )
else self.tokenizer.pad_token_id
)
__magic_name__ : List[str] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __snake_case )
else self.tokenizer.pad_token_id
)
__magic_name__ : Dict =trim_batch(__snake_case , __snake_case )
__magic_name__ , __magic_name__ : int =trim_batch(__snake_case , __snake_case , attention_mask=__snake_case )
__magic_name__ : Optional[int] ={
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase_ : List[Any] = getLogger(__name__)
def lowerCAmelCase_ ( lowerCamelCase ):
return list(itertools.chain.from_iterable(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Dict =get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""" ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=4 , **lowerCamelCase ):
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
with open(lowerCamelCase ) as f:
return json.load(lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : Union[str, Any] =git.Repo(search_parent_directories=lowerCamelCase )
__magic_name__ : List[str] ={
"""repo_id""": str(lowerCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return list(map(lowerCamelCase , lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase , """wb""" ) as f:
return pickle.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
def remove_articles(lowerCamelCase ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase )
def white_space_fix(lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase ):
__magic_name__ : Optional[int] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase ) ) ) )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : int =normalize_answer(lowerCamelCase ).split()
__magic_name__ : List[Any] =normalize_answer(lowerCamelCase ).split()
__magic_name__ : Dict =Counter(lowerCamelCase ) & Counter(lowerCamelCase )
__magic_name__ : Tuple =sum(common.values() )
if num_same == 0:
return 0
__magic_name__ : int =1.0 * num_same / len(lowerCamelCase )
__magic_name__ : Union[str, Any] =1.0 * num_same / len(lowerCamelCase )
__magic_name__ : Optional[Any] =(2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return normalize_answer(lowerCamelCase ) == normalize_answer(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
assert len(lowerCamelCase ) == len(lowerCamelCase )
__magic_name__ : Optional[Any] =0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase ):
em += exact_match_score(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) > 0:
em /= len(lowerCamelCase )
return {"em": em}
def lowerCAmelCase_ ( lowerCamelCase ):
return model_prefix.startswith("""rag""" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__magic_name__ : Optional[Any] ="""dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if not hasattr(lowerCamelCase , lowerCamelCase ) and not hasattr(lowerCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase ) )
delattr(lowerCamelCase , lowerCamelCase )
continue
__magic_name__ : Any =p if hasattr(lowerCamelCase , lowerCamelCase ) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
delattr(lowerCamelCase , lowerCamelCase )
return hparams, config
| 367
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """t5"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self :Optional[Any] , __snake_case :Optional[Any]=3_21_28 , __snake_case :Optional[int]=5_12 , __snake_case :str=64 , __snake_case :int=20_48 , __snake_case :Union[str, Any]=6 , __snake_case :Union[str, Any]=None , __snake_case :Union[str, Any]=8 , __snake_case :List[str]=32 , __snake_case :Dict=1_28 , __snake_case :List[Any]=0.1 , __snake_case :int=1E-6 , __snake_case :List[str]=1.0 , __snake_case :Optional[int]="relu" , __snake_case :List[str]=True , __snake_case :Union[str, Any]=True , __snake_case :Union[str, Any]=0 , __snake_case :str=1 , **__snake_case :int , ):
'''simple docstring'''
__magic_name__ : str =vocab_size
__magic_name__ : str =d_model
__magic_name__ : Optional[Any] =d_kv
__magic_name__ : int =d_ff
__magic_name__ : Union[str, Any] =num_layers
__magic_name__ : Tuple =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : List[str] =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : Union[str, Any] =relative_attention_max_distance
__magic_name__ : Dict =dropout_rate
__magic_name__ : Dict =layer_norm_epsilon
__magic_name__ : List[str] =initializer_factor
__magic_name__ : Any =feed_forward_proj
__magic_name__ : Optional[int] =use_cache
__magic_name__ : Tuple =self.feed_forward_proj.split("""-""" )
__magic_name__ : int =act_info[-1]
__magic_name__ : int =act_info[0] == """gated"""
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ : str ="""gelu_new"""
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class __A ( UpperCamelCase__ ):
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__magic_name__ : List[str] ="""past_encoder_sequence + sequence"""
__magic_name__ : List[Any] ={0: """batch"""}
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : int ={0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Tuple ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
return common_inputs
@property
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return 13
| 367
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __snake_case( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = ["input_ids", "attention_mask"]
UpperCAmelCase : Optional[Any] = None
def __init__( self , A_=None , A_=None , A_=None , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_=False , A_=False , **A_ , ) -> Dict:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , **_lowerCamelCase , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space:
lowerCAmelCase = getattr(_lowerCamelCase , pre_tok_state.pop("""type""" ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**_lowerCamelCase )
lowerCAmelCase = add_prefix_space
def __snake_case ( self , *A_ , **A_ ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __snake_case ( self , *A_ , **A_ ) -> BatchEncoding:
lowerCAmelCase = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __snake_case ( self , A_ ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 433
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(lowerCamelCase_ )
__lowercase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
print(lowerCamelCase_ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 502
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''speech_to_text'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=1_0_0_0_0 , lowercase_=1_2 , lowercase_=2_0_4_8 , lowercase_=4 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=4 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=2 , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=6_0_0_0 , lowercase_=1_0_2_4 , lowercase_=2 , lowercase_=(5, 5) , lowercase_=1_0_2_4 , lowercase_=8_0 , lowercase_=1 , **lowercase_ , ) -> Tuple:
__snake_case = vocab_size
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = max_source_positions
__snake_case = max_target_positions
__snake_case = num_conv_layers
__snake_case = list(_SCREAMING_SNAKE_CASE)
__snake_case = conv_channels
__snake_case = input_feat_per_channel
__snake_case = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 713
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCamelCase : Optional[Any] = 0b101100111110110010010000011110111011000110011110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCamelCase : Dict = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __snake_case :
def __init__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = WATERMARK_BITS
_lowerCAmelCase : Any = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : torch.FloatTensor ) -> int:
'''simple docstring'''
if images.shape[-1] < 256:
return images
_lowerCAmelCase : List[str] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : List[Any] = [self.encoder.encode(_UpperCAmelCase , """dwtDct""" ) for image in images]
_lowerCAmelCase : Dict = torch.from_numpy(np.array(_UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
_lowerCAmelCase : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 429
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowerCamelCase : Optional[Any] = "true"
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=82 , UpperCamelCase_ : List[str]=16 ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase : Optional[Any] = RegressionModel()
_lowerCAmelCase : List[Any] = deepcopy(UpperCamelCase_ )
_lowerCAmelCase : Tuple = RegressionDataset(length=UpperCamelCase_ )
_lowerCAmelCase : int = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return model, ddp_model, dataloader
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowerCAmelCase : str = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
_lowerCAmelCase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : str = dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_lowerCAmelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : Optional[int] ):
if use_longest:
return tokenizer.pad(UpperCamelCase_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=16 )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Accelerator(dispatch_batches=UpperCamelCase_ , split_batches=UpperCamelCase_ )
_lowerCAmelCase : Any = get_dataloader(UpperCamelCase_ , not dispatch_batches )
_lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : str = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = batch.values()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCamelCase_ )
targs.append(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = torch.cat(UpperCamelCase_ ), torch.cat(UpperCamelCase_ )
return logits, targs
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : Any=82 , UpperCamelCase_ : str=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Dict=16 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_basic_setup(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_predictions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert (
len(UpperCamelCase_ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase_ )}"
def _UpperCAmelCase (UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_mrpc_setup(UpperCamelCase_ , UpperCamelCase_ )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = setup["""no"""]
model.to(UpperCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(UpperCamelCase_ )
with torch.inference_mode():
_lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCamelCase_ , references=batch["""labels"""] )
_lowerCAmelCase : int = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Optional[int] = batch["""labels"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(UpperCamelCase_ , UpperCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : List[str] = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(UpperCamelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowerCAmelCase : Optional[int] = Accelerator()
test_torch_metrics(UpperCamelCase_ , 512 )
accelerator.state._reset_state()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 429
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCAmelCase = float("nan")
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = sys.stdout
_UpperCAmelCase = open(__UpperCamelCase , "a" )
def __getattr__( self : List[str] , __UpperCamelCase : str ):
return getattr(self.stdout , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[Any] ):
self.stdout.write(__UpperCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __UpperCamelCase , 0 , re.M ) )
def __lowerCamelCase ( _lowerCAmelCase=80 , _lowerCAmelCase=False ) -> List[Any]:
_UpperCAmelCase = []
# deal with critical env vars
_UpperCAmelCase = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_UpperCAmelCase = os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_UpperCAmelCase = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_UpperCAmelCase = []
_UpperCAmelCase = ""
while len(_lowerCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
_UpperCAmelCase = ""
return "\\\n".join(_lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
# unwrap multi-line input
_UpperCAmelCase = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_UpperCAmelCase = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_UpperCAmelCase = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
_UpperCAmelCase = subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_UpperCAmelCase = variation.replace(" " , "-" )
with open(Path(_lowerCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase = json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[Any]:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = F'''{id}: {variation:<{longest_variation_len}}'''
_UpperCAmelCase = F'''{preamble}: '''
_UpperCAmelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
_UpperCAmelCase = process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
_UpperCAmelCase = F'''\33[2K\r{outcome}'''
if len(_lowerCAmelCase ) > 0:
_UpperCAmelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_UpperCAmelCase = round(mean_metrics[target_metric_key] , 2 )
_UpperCAmelCase = F'''{outcome} {mean_target}'''
if len(_lowerCAmelCase ) > 1:
results_str += F''' {tuple(round(_lowerCAmelCase , 2 ) for x in results )}'''
print(_lowerCAmelCase )
_UpperCAmelCase = variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = pd.DataFrame(_lowerCAmelCase )
_UpperCAmelCase = "variation"
_UpperCAmelCase = "diff_%"
_UpperCAmelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_UpperCAmelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
_UpperCAmelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
_UpperCAmelCase = df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_UpperCAmelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_UpperCAmelCase = df.reindex(_lowerCAmelCase , axis="columns" ) # reorder cols
# capitalize
_UpperCAmelCase = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_UpperCAmelCase = df.rename(lambda _lowerCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
_UpperCAmelCase = df.rename(lambda _lowerCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
_UpperCAmelCase = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_lowerCAmelCase ) )
def __lowerCamelCase ( ) -> List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs="+" , required=_lowerCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_lowerCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_lowerCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_lowerCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_lowerCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
_UpperCAmelCase = get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
_UpperCAmelCase = [list(map(str.strip , re.split(r"\|" , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_UpperCAmelCase = list(map(str.strip , map(" ".join , itertools.product(*_lowerCAmelCase ) ) ) )
_UpperCAmelCase = max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
_UpperCAmelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
_UpperCAmelCase = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
_UpperCAmelCase = Tee(_lowerCAmelCase )
print(F'''\n*** Running {len(_lowerCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_lowerCAmelCase )}''' )
_UpperCAmelCase = "variation"
_UpperCAmelCase = []
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc="Total completion: " , leave=_lowerCAmelCase ) ):
_UpperCAmelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 129
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if metric == "rouge2":
_UpperCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_lowerCAmelCase , filename=_lowerCAmelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_lowerCAmelCase , verbose=_lowerCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback):
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
_UpperCAmelCase = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / "test_results.txt"
_UpperCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_UpperCAmelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str ):
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def UpperCAmelCase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 129
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCAmelCase : str = DDIMScheduler()
UpperCAmelCase : Union[str, Any] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def A ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=0 ) -> List[str]:
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase : str = torch.manual_seed(__snake_case )
else:
UpperCAmelCase : Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : str ) -> Any:
UpperCAmelCase : Dict = '''cpu'''
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__snake_case )
UpperCAmelCase : int = pipe(**__snake_case ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase : Optional[Any] = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
UpperCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1E-3 )
def A ( self : Optional[Any] ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A ( self : List[str] ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3E-3 )
def A ( self : Tuple ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A ( self : Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Dict ) -> List[str]:
UpperCAmelCase : Optional[int] = '''google/ddpm-cifar10-32'''
UpperCAmelCase : Dict = UNetaDModel.from_pretrained(__snake_case )
UpperCAmelCase : str = DDIMScheduler()
UpperCAmelCase : Optional[Any] = DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddim.to(__snake_case )
ddim.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ddim(generator=__snake_case , eta=0.0 , output_type='''numpy''' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Dict ) -> List[str]:
UpperCAmelCase : Any = '''google/ddpm-ema-bedroom-256'''
UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained(__snake_case )
UpperCAmelCase : Any = DDIMScheduler.from_pretrained(__snake_case )
UpperCAmelCase : str = DDIMPipeline(unet=__snake_case , scheduler=__snake_case )
ddpm.to(__snake_case )
ddpm.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ddpm(generator=__snake_case , output_type='''numpy''' ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 127
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__: Dict = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCamelCase__: Dict = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def snake_case_ ( ) -> str:
UpperCAmelCase : Tuple = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase : Any = bs[:]
UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : List[Any] = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def snake_case_ ( _lowerCAmelCase : Any ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : List[str] = char
return pairs
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __snake_case : int , __snake_case : Dict , __snake_case : int="replace" , __snake_case : str="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Optional[int]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : Optional[Any]="<pad>" , __snake_case : List[Any]="<mask>" , __snake_case : int=False , **__snake_case : Any , ) -> str:
UpperCAmelCase : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
UpperCAmelCase : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
UpperCAmelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
UpperCAmelCase : Optional[int] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
UpperCAmelCase : Union[str, Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
UpperCAmelCase : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : Union[str, Any] = json.load(__snake_case )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Any = errors # how to handle errors in decoding
UpperCAmelCase : List[Any] = bytes_to_unicode()
UpperCAmelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Optional[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : Union[str, Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self : Tuple ) -> Dict:
return len(self.encoder )
def A ( self : Any ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self : Any , __snake_case : List[str] ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Optional[int] = tuple(__snake_case )
UpperCAmelCase : List[str] = get_pairs(__snake_case )
if not pairs:
return token
while True:
UpperCAmelCase : int = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : int = bigram
UpperCAmelCase : Any = []
UpperCAmelCase : List[str] = 0
while i < len(__snake_case ):
try:
UpperCAmelCase : int = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Optional[int] = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Optional[int] = tuple(__snake_case )
UpperCAmelCase : Optional[Any] = new_word
if len(__snake_case ) == 1:
break
else:
UpperCAmelCase : List[str] = get_pairs(__snake_case )
UpperCAmelCase : str = ''' '''.join(__snake_case )
UpperCAmelCase : Union[str, Any] = word
return word
def A ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : int = []
for token in re.findall(self.pat , __snake_case ):
UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def A ( self : Any , __snake_case : List[Any] ) -> List[str]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def A ( self : Tuple , __snake_case : Any ) -> Any:
return self.decoder.get(__snake_case )
def A ( self : List[str] , __snake_case : List[str] ) -> Dict:
UpperCAmelCase : Tuple = ''''''.join(__snake_case )
UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Any = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Dict = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
UpperCAmelCase : List[str] = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : Dict = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple=False , **__snake_case : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
| 127
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase :Optional[int] = logging.get_logger(__name__)
__lowercase :str = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "conditional_detr"
snake_case_ = ["past_key_values"]
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , a : List[str]=True , a : Optional[Any]=None , a : Optional[int]=3 , a : Tuple=3_00 , a : int=6 , a : str=20_48 , a : str=8 , a : Optional[Any]=6 , a : str=20_48 , a : Dict=8 , a : Optional[int]=0.0 , a : Union[str, Any]=0.0 , a : Any=True , a : List[Any]="relu" , a : int=2_56 , a : Optional[int]=0.1 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : Optional[int]=0.02 , a : str=1.0 , a : Optional[int]=False , a : str="sine" , a : List[str]="resnet50" , a : Tuple=True , a : List[Any]=False , a : Any=2 , a : Union[str, Any]=5 , a : int=2 , a : Dict=1 , a : Optional[Any]=1 , a : List[Any]=2 , a : Optional[int]=5 , a : List[str]=2 , a : List[Any]=0.25 , **a : Any , ) ->List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE__ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE__ : Tuple = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Optional[Any] = config_class.from_dict(a )
SCREAMING_SNAKE_CASE__ : Any = use_timm_backbone
SCREAMING_SNAKE_CASE__ : List[str] = backbone_config
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_xavier_std
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : int = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE__ : int = backbone
SCREAMING_SNAKE_CASE__ : Tuple = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Optional[Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Union[str, Any] = class_cost
SCREAMING_SNAKE_CASE__ : int = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = cls_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Dict = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def A_ ( self : List[Any] ) ->int:
return self.encoder_attention_heads
@property
def A_ ( self : int ) ->int:
return self.d_model
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__class__.model_type
return output
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = version.parse("1.11" )
@property
def A_ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A_ ( self : Optional[int] ) ->float:
return 1E-5
@property
def A_ ( self : Any ) ->int:
return 12
| 26
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.