code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from statistics import mean
import numpy as np
def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> list:
"""simple docstring"""
lowercase__ = 0
# Number of processes finished
lowercase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ = [0] * no_of_process
# List to include calculation results
lowercase__ = [0] * no_of_process
# Sort by arrival time.
lowercase__ = [burst_time[i] for i in np.argsort(A )]
lowercase__ = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ = arrival_time[i]
lowercase__ = 0
# Index showing the location of the process being performed
lowercase__ = 0
# Saves the current response ratio.
lowercase__ = 0
for i in range(0 , A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ = temp
lowercase__ = i
# Calculate the turn around time
lowercase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> list:
"""simple docstring"""
lowercase__ = [0] * no_of_process
for i in range(0 , A ):
lowercase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = 5
lowerCamelCase : List[str] = ['A', 'B', 'C', 'D', 'E']
lowerCamelCase : Dict = [1, 2, 3, 4, 5]
lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5]
lowerCamelCase : Dict = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase : int = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(f"""average waiting time : {mean(waiting_time):.5f}""")
print(f"""average turn around time : {mean(turn_around_time):.5f}""")
| 460
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : List[Any]="<sep>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : int="<cls>" , UpperCamelCase : int="<mask>" , UpperCamelCase : str=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 460
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 706
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> int:
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if type(lowerCamelCase ) is dict:
return {k: self.to_torch(lowerCamelCase ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase , device=self.unet.device )
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , lowerCamelCase , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(lowerCamelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , predict_epsilon=lowerCamelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
return x, y
def __call__( self , lowerCamelCase , lowerCamelCase=64 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=0.1 ) -> List[Any]:
# normalize the observations and create batch dimension
snake_case_ = self.normalize(lowerCamelCase , """observations""" )
snake_case_ = obs[None].repeat(lowerCamelCase , axis=0 )
snake_case_ = {0: self.to_torch(lowerCamelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(lowerCamelCase , device=self.unet.device )
snake_case_ = self.reset_xa(lowerCamelCase , lowerCamelCase , self.action_dim )
snake_case_ = self.to_torch(lowerCamelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=lowerCamelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(lowerCamelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , lowerCamelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 161
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73
|
def lowerCamelCase__ (_UpperCAmelCase = 10 , _UpperCAmelCase = 1000 , _UpperCAmelCase = True):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)')
return min_val if option else max_val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int((number_a + number_a) / 2)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase) and isinstance(_UpperCAmelCase , _UpperCAmelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)')
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value')
def answer(_UpperCAmelCase) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...')
SCREAMING_SNAKE_CASE = lower
SCREAMING_SNAKE_CASE = higher
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = get_avg(_UpperCAmelCase , _UpperCAmelCase)
last_numbers.append(_UpperCAmelCase)
if answer(_UpperCAmelCase) == "low":
SCREAMING_SNAKE_CASE = number
elif answer(_UpperCAmelCase) == "high":
SCREAMING_SNAKE_CASE = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''')
print(F'''details : {last_numbers!s}''')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = int(input('Enter lower value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter high value : ').strip())
SCREAMING_SNAKE_CASE = int(input('Enter value to guess : ').strip())
guess_the_number(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
main()
| 73
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase(self : int ) -> Any:
snake_case = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
snake_case = AutoTokenizer.from_pretrained("google/mt5-small" )
snake_case = tokenizer("Hello there" , return_tensors="tf" ).input_ids
snake_case = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
snake_case = model(_A , labels=_A ).loss
snake_case = -tf.math.reduce_mean(_A ).numpy()
snake_case = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 716
|
_A = 0 # The first color of the flag.
_A = 1 # The second color of the flag.
_A = 2 # The third color of the flag.
_A = (red, white, blue)
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(A__ ) == 1:
return list(A__ )
snake_case = 0
snake_case = len(A__ ) - 1
snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case , snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case , snake_case = sequence[high], sequence[mid]
high -= 1
else:
snake_case = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(A__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by commas:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 294
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False, False, False
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
SCREAMING_SNAKE_CASE : str = field(default="Audio" , init=__A , repr=__A )
def __call__( self : Dict ) ->Any:
return self.pa_type
def snake_case__( self : Tuple , _UpperCamelCase : Union[str, bytes, dict] ) ->dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case_ = BytesIO()
sf.write(_UpperCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
snake_case_ = BytesIO(bytes() )
sf.write(_UpperCamelCase , _UpperCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__( self : Optional[int] , _UpperCamelCase : dict , _UpperCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) ->dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case_, snake_case_ = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case_ = xsplitext(_UpperCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case_ = token_per_repo_id or {}
snake_case_ = path.split('''::''' )[-1]
try:
snake_case_ = string_to_dict(_UpperCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
snake_case_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case_ = None
with xopen(_UpperCamelCase , '''rb''' , use_auth_token=_UpperCamelCase ) as f:
snake_case_, snake_case_ = sf.read(_UpperCamelCase )
else:
snake_case_, snake_case_ = sf.read(_UpperCamelCase )
snake_case_ = array.T
if self.mono:
snake_case_ = librosa.to_mono(_UpperCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case_ = librosa.resample(_UpperCamelCase , orig_sr=_UpperCamelCase , target_sr=self.sampling_rate )
snake_case_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__( self : Optional[Any] ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def snake_case__( self : Any , _UpperCamelCase : Union[pa.StringArray, pa.StructArray] ) ->pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case_ = pa.array([None] * len(_UpperCamelCase ) , type=pa.binary() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case_ = pa.array([None] * len(_UpperCamelCase ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case_ = pa.array([Audio().encode_example(_UpperCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case_ = storage.field('''bytes''' )
else:
snake_case_ = pa.array([None] * len(_UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case_ = storage.field('''path''' )
else:
snake_case_ = pa.array([None] * len(_UpperCamelCase ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(_UpperCamelCase , self.pa_type )
def snake_case__( self : List[Any] , _UpperCamelCase : pa.StructArray ) ->pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_UpperCamelCase : Any ):
with xopen(_UpperCamelCase , '''rb''' ) as f:
snake_case_ = f.read()
return bytes_
snake_case_ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case_ = pa.array(
[os.path.basename(_UpperCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_UpperCamelCase , self.pa_type )
| 39
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
'''simple docstring'''
__A : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
__A : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__A : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : Optional[Any] = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : List[str] = 16
elif accelerator.mixed_precision != "no":
__A : Tuple = 8
else:
__A : Optional[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
__A : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__A : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
__A : int = 2
# New Code #
__A : List[str] = int(args.gradient_accumulation_steps )
__A : int = int(args.local_sgd_steps )
# Initialize accelerator
__A : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Tuple = config["lr"]
__A : Union[str, Any] = int(config["num_epochs"] )
__A : Optional[int] = int(config["seed"] )
__A : Optional[Any] = int(config["batch_size"] )
__A : Optional[int] = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE )
__A ,__A : Tuple = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__A : str = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__A : Optional[int] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , local_sgd_steps=SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
__A : List[Any] = model(**SCREAMING_SNAKE_CASE )
__A : List[str] = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A : Tuple = model(**SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__A ,__A : Tuple = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__A : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE )
def _lowercase ():
'''simple docstring'''
__A : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=SCREAMING_SNAKE_CASE , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__A : List[str] = parser.parse_args()
__A : List[str] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 111
| 0
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='Speech2TextFeatureExtractor'
lowerCamelCase__ ='Speech2TextTokenizer'
def __init__(self , a_ , a_ ):
'''simple docstring'''
super().__init__(a_ , a_ )
__snake_case : Dict = self.feature_extractor
__snake_case : Any = False
def __call__(self , *a_ , **a_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__snake_case : List[Any] = kwargs.pop('''raw_speech''' )
else:
__snake_case : List[str] = kwargs.pop('''audio''' , a_ )
__snake_case : Dict = kwargs.pop('''sampling_rate''' , a_ )
__snake_case : int = kwargs.pop('''text''' , a_ )
if len(a_ ) > 0:
__snake_case : List[Any] = args[0]
__snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__snake_case : List[str] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(a_ , **a_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@contextmanager
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__snake_case : Dict = True
__snake_case : List[Any] = self.tokenizer
yield
__snake_case : Tuple = self.feature_extractor
__snake_case : Union[str, Any] = False
| 229
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 229
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {'''vocab_file''': '''vocab.txt'''}
SCREAMING_SNAKE_CASE : List[str] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
SCREAMING_SNAKE_CASE : str = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = collections.OrderedDict()
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(snake_case_ ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
'''simple docstring'''
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(lowerCamelCase ):
_lowerCAmelCase = len(lowerCamelCase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
_lowerCAmelCase = end
return sub_tokens
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = False
def __init__(self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
'''simple docstring'''
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(lowerCamelCase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def A__ (self ):
'''simple docstring'''
return len(self.encoder )
def A__ (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def A__ (self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return token in self.encoder
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return "".join(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase , self.unk_token )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if os.path.isdir(lowerCamelCase ):
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 156
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=99 , UpperCamelCase__ : str=64 , UpperCamelCase__ : str=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[Any]=None , ) -> str:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =embedding_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_labels
_UpperCamelCase =num_choices
_UpperCamelCase =scope
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
if self.use_labels:
_UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[str] ) -> Optional[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
_UpperCamelCase =MegatronBertModel(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(__A , attention_mask=__A , token_type_ids=__A )
_UpperCamelCase =model(__A , token_type_ids=__A )
_UpperCamelCase =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> Optional[int]:
_UpperCamelCase =MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
_UpperCamelCase =MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> str:
_UpperCamelCase =MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> List[str]:
_UpperCamelCase =MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
_UpperCamelCase =MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase =self.num_labels
_UpperCamelCase =MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Union[str, Any]:
_UpperCamelCase =self.num_labels
_UpperCamelCase =MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Dict:
_UpperCamelCase =self.num_choices
_UpperCamelCase =MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : List[Any] ) -> Any:
_UpperCamelCase =self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
# test_resize_embeddings = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=False ) -> Dict:
_UpperCamelCase =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_UpperCamelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
_UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def UpperCamelCase__ ( self : List[str] ) -> str:
_UpperCamelCase =MegatronBertModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def UpperCamelCase__ ( self : int ) -> Union[str, Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def UpperCamelCase__ ( self : Tuple ) -> Optional[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def UpperCamelCase__ ( self : List[Any] ) -> Optional[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def UpperCamelCase__ ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def UpperCamelCase__ ( self : int ) -> Dict:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return torch.tensor(
_lowercase , dtype=torch.long , device=_lowercase , )
__lowerCamelCase : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
_UpperCamelCase =os.path.join(os.environ['''MYDIR'''] , __A )
_UpperCamelCase =MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
_UpperCamelCase =_long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_UpperCamelCase =model(__A )[0]
_UpperCamelCase =torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
_UpperCamelCase =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
_UpperCamelCase =output[0, ii, jj]
_UpperCamelCase =expected[3 * ii + jj]
_UpperCamelCase ='''ii={} jj={} a={} b={}'''.format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
| 718
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase :
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCamelCase =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
_UpperCamelCase =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
_UpperCamelCase =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCamelCase =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase__ ( self : Any ) -> Any:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''prompt''']
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
if "image" in inputs:
_UpperCamelCase =inputs['''image''']
else:
_UpperCamelCase =None
if "mask_image" in inputs:
_UpperCamelCase =inputs['''mask_image''']
else:
_UpperCamelCase =None
if "original_image" in inputs:
_UpperCamelCase =inputs['''original_image''']
else:
_UpperCamelCase =None
_UpperCamelCase , _UpperCamelCase =pipe.encode_prompt(UpperCamelCase__ )
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =inputs['''generator''']
_UpperCamelCase =inputs['''num_inference_steps''']
_UpperCamelCase =inputs['''output_type''']
# inputs with prompt converted to embeddings
_UpperCamelCase ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
_UpperCamelCase =image
if mask_image is not None:
_UpperCamelCase =mask_image
if original_image is not None:
_UpperCamelCase =original_image
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =self.get_dummy_components()
_UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCamelCase =self.get_dummy_inputs(UpperCamelCase__ )
_UpperCamelCase =pipe_loaded(**UpperCamelCase__ )[0]
_UpperCamelCase =np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1E-4 )
| 271
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ = """hf-internal-testing/tiny-random-bert"""
a_ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
a_ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = cached_file(__lowerCamelCase , __lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCamelCase , __lowerCamelCase ) ) )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
__A : int = f.read()
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# File is cached at the same place the second time.
__A : Union[str, Any] = cached_file(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# Using a specific revision to test the full commit hash.
__A : Optional[int] = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , '''snapshots''' , __lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
__A : str = cached_file('''tiny-random-bert''' , __lowerCamelCase )
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
__A : List[Any] = cached_file(__lowerCamelCase , __lowerCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
__A : Dict = cached_file(__lowerCamelCase , '''conf''' )
def UpperCamelCase__( self ):
'''simple docstring'''
with self.assertRaisesRegex(__lowerCamelCase , '''does not appear to have a file named''' ):
__A : List[Any] = cached_file(__lowerCamelCase , '''conf''' )
with open(os.path.join(__lowerCamelCase , '''refs''' , '''main''' ) ) as f:
__A : int = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''.no_exist''' , __lowerCamelCase , '''conf''' ) ) )
__A : Any = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__A : Optional[Any] = cached_file(__lowerCamelCase , '''conf''' , local_files_only=__lowerCamelCase , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__A : Tuple = mock.Mock()
__A : Optional[int] = 500
__A : str = {}
__A : Optional[Any] = HTTPError
__A : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
__A : Optional[Any] = cached_file(__lowerCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCamelCase , revision='''ahaha''' )
__A : Optional[int] = get_file_from_repo('''bert-base-cased''' , __lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__A : str = json.loads(open(__lowerCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A : List[Any] = Path(__lowerCamelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCamelCase , '''a.txt''' ) , str(__lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCamelCase , '''b.txt''' ) )
| 177
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a_ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 177
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
snake_case = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
snake_case = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
snake_case = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
snake_case = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
snake_case = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
snake_case = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : List[str] = DPRContextEncoderTokenizer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : List[Any] = DPRQuestionEncoderTokenizer
snake_case = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
snake_case = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
snake_case = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase )
class A_ :
"""simple docstring"""
def __call__( self : int ,__A : Union[str, Any] ,__A : Optional[str] = None ,__A : Optional[str] = None ,__A : Union[bool, str] = False ,__A : Union[bool, str] = False ,__A : Optional[int] = None ,__A : Optional[Union[str, TensorType]] = None ,__A : Optional[bool] = None ,**__A : Optional[int] ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__A ,padding=__A ,truncation=__A ,max_length=__A ,return_tensors=__A ,return_attention_mask=__A ,**__A ,)
elif titles is None or texts is None:
_lowercase = titles if texts is None else texts
return super().__call__(
__A ,__A ,padding=__A ,truncation=__A ,max_length=__A ,return_tensors=__A ,return_attention_mask=__A ,**__A ,)
_lowercase = titles if not isinstance(__A ,__A ) else [titles]
_lowercase = texts if not isinstance(__A ,__A ) else [texts]
_lowercase = len(__A )
_lowercase = questions if not isinstance(__A ,__A ) else [questions] * n_passages
assert len(__A ) == len(
__A ), F"""There should be as many titles than texts but got {len(__A )} titles and {len(__A )} texts."""
_lowercase = super().__call__(__A ,__A ,padding=__A ,truncation=__A )['input_ids']
_lowercase = super().__call__(__A ,add_special_tokens=__A ,padding=__A ,truncation=__A )['input_ids']
_lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__A ,__A )
]
}
if return_attention_mask is not False:
_lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowercase = attention_mask
return self.pad(__A ,padding=__A ,max_length=__A ,return_tensors=__A )
def __UpperCAmelCase ( self : str ,__A : BatchEncoding ,__A : DPRReaderOutput ,__A : int = 16 ,__A : int = 64 ,__A : int = 4 ,) -> List[DPRSpanPrediction]:
_lowercase = reader_input['input_ids']
_lowercase , _lowercase , _lowercase = reader_output[:3]
_lowercase = len(__A )
_lowercase = sorted(range(__A ) ,reverse=__A ,key=relevance_logits.__getitem__ )
_lowercase = []
for doc_id in sorted_docs:
_lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowercase = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase = sequence_ids.index(self.pad_token_id )
else:
_lowercase = len(__A )
_lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=__A ,top_spans=__A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=__A ,start_index=__A ,end_index=__A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(__A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self : List[Any] ,__A : List[int] ,__A : List[int] ,__A : int ,__A : int ,) -> List[DPRSpanPrediction]:
_lowercase = []
for start_index, start_score in enumerate(__A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowercase = sorted(__A ,key=lambda __A : x[1] ,reverse=__A )
_lowercase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
_lowercase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase )
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Tuple = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ : Tuple = DPRReaderTokenizer
| 535
|
from ....utils import logging
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : int ,__A : List[Any]=None ,__A : List[str]=2048 ) -> List[Any]:
_lowercase = config.__dict__
_lowercase = modal_hidden_size
if num_labels:
_lowercase = num_labels
| 535
| 1
|
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ) -> List[Any]:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[Any] = graph
self._normalize_graph(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : int = len(lowerCamelCase )
lowerCAmelCase_ : Any = None
def __lowercase ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int ) -> Union[str, Any]:
if sources is int:
lowerCAmelCase_ : Optional[int] = [sources]
if sinks is int:
lowerCAmelCase_ : List[Any] = [sinks]
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
return
lowerCAmelCase_ : Optional[int] = sources[0]
lowerCAmelCase_ : int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase ) > 1 or len(lowerCamelCase ) > 1:
lowerCAmelCase_ : int = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase_ : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase_ : int = max_input_flow
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase_ : List[str] = max_input_flow
lowerCAmelCase_ : Optional[Any] = size - 1
def __lowercase ( self : Union[str, Any] ) -> int:
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowercase ( self : Any , lowerCamelCase : List[str] ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = algorithm(self )
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[Any] ) -> str:
lowerCAmelCase_ : int = flow_network
lowerCAmelCase_ : Optional[int] = flow_network.verticesCount
lowerCAmelCase_ : List[str] = flow_network.sourceIndex
lowerCAmelCase_ : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase_ : int = flow_network.graph
lowerCAmelCase_ : Union[str, Any] = False
def __lowercase ( self : List[str] ) -> Optional[int]:
if not self.executed:
self._algorithm()
lowerCAmelCase_ : Optional[Any] = True
def __lowercase ( self : List[Any] ) -> int:
pass
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : List[str] ) -> Tuple:
super().__init__(lowerCamelCase )
# use this to save your result
lowerCAmelCase_ : int = -1
def __lowercase ( self : Optional[int] ) -> Optional[int]:
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Union[str, Any] ) -> Dict:
super().__init__(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase_ : Dict = [0] * self.verticies_count
lowerCAmelCase_ : int = [0] * self.verticies_count
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase_ : Dict = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase_ : int = 0
while i < len(lowerCamelCase ):
lowerCAmelCase_ : Dict = vertices_list[i]
lowerCAmelCase_ : Union[str, Any] = self.heights[vertex_index]
self.process_vertex(lowerCamelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase ) )
lowerCAmelCase_ : Dict = 0
else:
i += 1
lowerCAmelCase_ : List[Any] = sum(self.preflow[self.source_index] )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> List[str]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase , lowerCamelCase )
self.relabel(lowerCamelCase )
def __lowercase ( self : str , lowerCamelCase : str , lowerCamelCase : Dict ) -> Optional[int]:
lowerCAmelCase_ : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowercase ( self : Dict , lowerCamelCase : List[Any] ) -> Any:
lowerCAmelCase_ : List[str] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase_ : List[str] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase_ : Optional[Any] = min_height + 1
if __name__ == "__main__":
__A : Any = [0]
__A : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A : List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A : Dict = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 275
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[int | float]] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = len(A__ )
lowerCAmelCase_ : Union[str, Any] = len(matrix[0] )
lowerCAmelCase_ : Optional[int] = min(A__ , A__ )
for row in range(A__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A__ ):
lowerCAmelCase_ : int = matrix[col][row] / matrix[row][row]
for i in range(A__ , A__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase_ : int = True
for i in range(row + 1 , A__ ):
if matrix[i][row] != 0:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = matrix[i], matrix[row]
lowerCAmelCase_ : Dict = False
break
if reduce:
rank -= 1
for i in range(A__ ):
lowerCAmelCase_ : List[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Union[str, Any] = '''BlipImageProcessor'''
UpperCamelCase_ : List[Any] = '''AutoTokenizer'''
def __init__( self : Optional[int] , A_ : Any , A_ : List[Any] , A_ : Dict ) -> Dict:
super().__init__(A_ , A_ )
# add QFormer tokenizer
__snake_case = qformer_tokenizer
def __call__( self : str , A_ : ImageInput = None , A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A_ : bool = True , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Union[bool, str, TruncationStrategy] = None , A_ : Optional[int] = None , A_ : int = 0 , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : Optional[Union[str, TensorType]] = None , **A_ : Tuple , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__snake_case = BatchFeature()
if text is not None:
__snake_case = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
encoding.update(A_ )
__snake_case = self.qformer_tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
__snake_case = qformer_text_encoding.pop('''input_ids''' )
__snake_case = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__snake_case = self.image_processor(A_ , return_tensors=A_ )
encoding.update(A_ )
return encoding
def lowercase ( self : str , *A_ : str , **A_ : Optional[Any] ) -> str:
return self.tokenizer.batch_decode(*A_ , **A_ )
def lowercase ( self : Tuple , *A_ : Any , **A_ : int ) -> int:
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase ( self : Optional[Any] ) -> int:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase ( self : Optional[int] , A_ : Optional[int] , **A_ : str ) -> Union[str, Any]:
if os.path.isfile(A_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(A_ , exist_ok=A_ )
__snake_case = os.path.join(A_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(A_ )
return super().save_pretrained(A_ , **A_ )
@classmethod
def lowercase ( cls : Dict , A_ : str , **A_ : int ) -> Union[str, Any]:
__snake_case = AutoTokenizer.from_pretrained(A_ , subfolder='''qformer_tokenizer''' )
__snake_case = cls._get_arguments_from_pretrained(A_ , **A_ )
args.append(A_ )
return cls(*A_ )
| 93
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = parent
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case_ : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : int = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = MarkupLMFeatureExtractionTester(self )
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
snake_case_ : Tuple = get_html_strings()[0]
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ )
# fmt: off
snake_case_ : int = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case_ : Any = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
# Test batched
snake_case_ : int = get_html_strings()
snake_case_ : List[str] = feature_extractor(__magic_name__ )
# fmt: off
snake_case_ : Any = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case_ : int = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __magic_name__ )
self.assertEqual(encoding.xpaths , __magic_name__ )
| 60
|
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[str] = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "wav2vec2"
def __init__( self: int , UpperCamelCase: Union[str, Any]=32 , UpperCamelCase: Dict=7_68 , UpperCamelCase: Optional[int]=12 , UpperCamelCase: List[str]=12 , UpperCamelCase: Tuple=30_72 , UpperCamelCase: Optional[Any]="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Any=0.0 , UpperCamelCase: Union[str, Any]=0.0 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.02 , UpperCamelCase: Tuple=1e-5 , UpperCamelCase: Tuple="group" , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Any=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase: Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase: str=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase: int=False , UpperCamelCase: List[str]=1_28 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=False , UpperCamelCase: str=True , UpperCamelCase: str=0.05 , UpperCamelCase: Optional[Any]=10 , UpperCamelCase: int=2 , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: List[Any]=10 , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: List[Any]=3_20 , UpperCamelCase: List[str]=2 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=1_00 , UpperCamelCase: str=2_56 , UpperCamelCase: Optional[Any]=2_56 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: Optional[int]="sum" , UpperCamelCase: List[Any]=False , UpperCamelCase: Optional[Any]=False , UpperCamelCase: Optional[int]=2_56 , UpperCamelCase: Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCamelCase: Tuple=(5, 3, 3, 1, 1) , UpperCamelCase: Tuple=(1, 2, 3, 1, 1) , UpperCamelCase: List[Any]=5_12 , UpperCamelCase: Tuple=0 , UpperCamelCase: List[Any]=1 , UpperCamelCase: Dict=2 , UpperCamelCase: List[Any]=False , UpperCamelCase: str=3 , UpperCamelCase: int=2 , UpperCamelCase: str=3 , UpperCamelCase: List[str]=None , UpperCamelCase: Optional[int]=None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(UpperCamelCase )
A__ = list(UpperCamelCase )
A__ = list(UpperCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
A__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(UpperCamelCase )
A__ = list(UpperCamelCase )
A__ = list(UpperCamelCase )
A__ = xvector_output_dim
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 500
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , *UpperCamelCase: Optional[int] , **UpperCamelCase: Tuple ):
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 500
| 1
|
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase :Optional[int] = logging.getLogger(__name__)
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Any = "masked_bert"
def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=0 , lowercase__="topK" , lowercase__="constant" , lowercase__=0.0 , **lowercase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = pruning_method
SCREAMING_SNAKE_CASE : Any = mask_init
SCREAMING_SNAKE_CASE : List[str] = mask_scale
| 251
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
UpperCAmelCase_ : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Tuple = multiprocessing.cpu_count()
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : List[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 716
|
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE_ : str = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE_ : int = find_max(__a , __a , __a ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE_ : str = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 176
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Tuple: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase_ ( ) -> str:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE_ = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
| 31
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ =logging.get_logger(__name__)
class lowerCamelCase__ ( _a ):
a : Optional[int] = """vision-encoder-decoder"""
a : Dict = True
def __init__( self : Dict , **A_ : List[Any] ):
'''simple docstring'''
super().__init__(**A_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__lowercase = kwargs.pop("""encoder""" )
__lowercase = encoder_config.pop("""model_type""" )
__lowercase = kwargs.pop("""decoder""" )
__lowercase = decoder_config.pop("""model_type""" )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowercase = True
__lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.encoder.to_dict()
__lowercase = self.decoder.to_dict()
__lowercase = self.__class__.model_type
return output
class lowerCamelCase__ ( _a ):
a : Dict = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = OrderedDict()
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : "PreTrainedTokenizerBase" , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
__lowercase = OrderedDict()
__lowercase = super().generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
__lowercase , __lowercase = dummy_input["""input_ids"""].shape
__lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowercase = dummy_input.pop("""input_ids""" )
__lowercase = dummy_input.pop("""attention_mask""" )
__lowercase = torch.zeros(A_ )
return common_inputs
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : PretrainedConfig , A_ : PretrainedConfig , A_ : str = "default" ):
'''simple docstring'''
__lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A_ , A_ )
| 616
| 0
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = [False] * len(__lowerCAmelCase )
lowerCAmelCase_ = [-1] * len(__lowerCAmelCase )
def dfs(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
lowerCAmelCase_ = True
lowerCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCAmelCase , 1 - c )
for i in range(len(__lowerCAmelCase ) ):
if not visited[i]:
dfs(__lowerCAmelCase , 0 )
for i in range(len(__lowerCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 716
|
from __future__ import annotations
_A = list[tuple[int, int]]
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
lowerCAmelCase_ = pos_x
lowerCAmelCase_ = pos_y
lowerCAmelCase_ = (pos_y, pos_x)
lowerCAmelCase_ = goal_x
lowerCAmelCase_ = goal_y
lowerCAmelCase_ = g_cost
lowerCAmelCase_ = parent
lowerCAmelCase_ = self.calculate_heuristic()
def __a ( self ) -> float:
lowerCAmelCase_ = abs(self.pos_x - self.goal_x )
lowerCAmelCase_ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _UpperCamelCase ) -> bool:
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCamelCase )
lowerCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _UpperCamelCase )
lowerCAmelCase_ = [self.start]
lowerCAmelCase_ = []
lowerCAmelCase_ = False
def __a ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ = True
return self.retrace_path(_UpperCamelCase )
self.closed_nodes.append(_UpperCamelCase )
lowerCAmelCase_ = self.get_successors(_UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_UpperCamelCase )
else:
# retrieve the best current path
lowerCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_UpperCamelCase )
else:
self.open_nodes.append(_UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __a ( self , _UpperCamelCase ) -> list[Node]:
lowerCAmelCase_ = []
for action in delta:
lowerCAmelCase_ = parent.pos_x + action[1]
lowerCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_UpperCamelCase , _UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCamelCase , ) )
return successors
def __a ( self , _UpperCamelCase ) -> Path:
lowerCAmelCase_ = node
lowerCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_A = GreedyBestFirst(init, goal)
_A = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_A = 2
for elem in grid:
print(elem)
| 279
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( snake_case_ : str ,snake_case_ : list[str] | None = None ) ->List[Any]:
'''simple docstring'''
__A : Dict = word_bank or []
# create a table
__A : Any = len(A__ ) + 1
__A : Tuple = []
for _ in range(A__ ):
table.append([] )
# seed value
__A : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A__ )] == word:
__A : Tuple = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A__ )]:
combination.reverse()
return table[len(A__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 177
|
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 254
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ =logging.get_logger(__name__)
def __UpperCamelCase ( A ):
# initialize config
if "resnet-50" in model_name:
UpperCamelCase__ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCamelCase__ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCamelCase__ = DetrConfig(use_timm_backbone=A , backbone_config=A )
# set label attributes
UpperCamelCase__ = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase__ = 250
else:
UpperCamelCase__ = 91
UpperCamelCase__ = '''huggingface/label-files'''
UpperCamelCase__ = '''coco-detection-id2label.json'''
UpperCamelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ = {int(A ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __UpperCamelCase ( A ):
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = state_dict.pop(A )
UpperCamelCase__ = val
def __UpperCamelCase ( A , A=False ):
UpperCamelCase__ = ''''''
if is_panoptic:
UpperCamelCase__ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:256, :]
UpperCamelCase__ = in_proj_bias[:256]
UpperCamelCase__ = in_proj_weight[256:512, :]
UpperCamelCase__ = in_proj_bias[256:512]
UpperCamelCase__ = in_proj_weight[-256:, :]
UpperCamelCase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:256, :]
UpperCamelCase__ = in_proj_bias[:256]
UpperCamelCase__ = in_proj_weight[256:512, :]
UpperCamelCase__ = in_proj_bias[256:512]
UpperCamelCase__ = in_proj_weight[-256:, :]
UpperCamelCase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
UpperCamelCase__ = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ = in_proj_bias_cross_attn[:256]
UpperCamelCase__ = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ = in_proj_bias_cross_attn[-256:]
def __UpperCamelCase ( ):
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( A , A=None , A=False ):
UpperCamelCase__ , UpperCamelCase__ = get_detr_config(A )
# load original model from torch hub
UpperCamelCase__ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f"Converting model {model_name}..." )
UpperCamelCase__ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=A ).eval()
UpperCamelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A ):
if is_panoptic:
UpperCamelCase__ = '''detr.''' + src
rename_key(A , A , A )
# query, key and value matrices need special treatment
read_in_q_k_v(A , is_panoptic=A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase__ = state_dict.pop(A )
UpperCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase__ = state_dict.pop(A )
UpperCamelCase__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase__ = state_dict.pop(A )
UpperCamelCase__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ = state_dict.pop(A )
UpperCamelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ = DetrForSegmentation(A ) if is_panoptic else DetrForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion on an image
UpperCamelCase__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase__ = DetrImageProcessor(format=A )
UpperCamelCase__ = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ = encoding['''pixel_values''']
UpperCamelCase__ = detr(A )
UpperCamelCase__ = model(A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f"nielsr/{model_name}" )
processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
__magic_name__ =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 469
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCamelCase ( A , A ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ = torch.permute(A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ):
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __UpperCamelCase ( A , A , A ):
if "metadata" in layer:
UpperCamelCase__ = layer.split('''metadata''' )
UpperCamelCase__ = ''''''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
UpperCamelCase__ = layer.split('''kvstore''' )
UpperCamelCase__ = ''''''.join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
UpperCamelCase__ = layer.split('''/''' )
UpperCamelCase__ = '''/'''.join(split_layer[:-1] )
UpperCamelCase__ = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase__ = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
UpperCamelCase__ = '''file'''
else:
UpperCamelCase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = rename_keys(A )
UpperCamelCase__ = {}
for k, v in current_block.items():
UpperCamelCase__ = v
UpperCamelCase__ = new_current_block
torch.save(A , A )
def __UpperCamelCase ( A , A , A , A , A = WEIGHTS_NAME ):
UpperCamelCase__ = convert_file_size_to_int(A )
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = 0
os.makedirs(A , exist_ok=A )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
UpperCamelCase__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
UpperCamelCase__ = flatten_dict(A , sep='''/''' )
UpperCamelCase__ = {}
for layer in checkpoint_info.keys():
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = get_key_and_tensorstore_dict(
A , A , A )
if curr_real_layer_name in all_layers:
UpperCamelCase__ = content
else:
UpperCamelCase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase__ = torch.tensor(A )
UpperCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase__ , UpperCamelCase__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , A )
UpperCamelCase__ = '''/'''.join(A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase__ = os.path.join(
A , weights_name.replace('''.bin''' , f"-{len(A )+1:05d}-of-???.bin" ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = raw_weights.to(getattr(A , A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase__ = os.path.join(A , weights_name.replace('''.bin''' , f"-{len(A )+1:05d}-of-???.bin" ) )
rename_and_save_block(A , A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for idx, shard in enumerate(A ):
UpperCamelCase__ = weights_name.replace(
'''.bin''' , f"-{idx+1:05d}-of-{len(A ):05d}.bin" ) # len(sharded_state_dicts):05d}
UpperCamelCase__ = os.path.join(A , weights_name.replace('''.bin''' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(A , os.path.join(A , A ) )
UpperCamelCase__ = shard
for key in shard:
UpperCamelCase__ = shard_file
# Add the metadata
UpperCamelCase__ = {'''total_size''': total_size}
UpperCamelCase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(A , A ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
return metadata, index
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__magic_name__ =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
UpperCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
UpperCamelCase__ = TaTokenizer.from_pretrained('''t5-small''' )
UpperCamelCase__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
UpperCamelCase__ = tokenizer(A , return_tensors='''pt''' ).input_ids
UpperCamelCase__ = model.generate(A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 469
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ , lowerCAmelCase__ : Dict = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase__ : int = result + left + right
return input_list
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if len(UpperCamelCase ) <= 1:
return input_list
lowerCAmelCase__ : List[str] = list(UpperCamelCase )
# iteration for two-way merging
lowerCAmelCase__ : str = 2
while p <= len(UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ):
lowerCAmelCase__ : Dict = i
lowerCAmelCase__ : List[str] = i + p - 1
lowerCAmelCase__ : str = (low + high + 1) // 2
lowerCAmelCase__ : Optional[int] = merge(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase ):
lowerCAmelCase__ : int = i
lowerCAmelCase__ : List[str] = merge(UpperCamelCase , 0 , UpperCamelCase , len(UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase = []
else:
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 565
|
class a :
def __init__( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = arr.split("," )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCAmelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCAmelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCamelCase_ = input("please input some numbers:")
UpperCamelCase_ = SubArray(whole_array)
UpperCamelCase_ = array.solve_sub_array()
print(("the results is:", re))
| 611
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=10 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase="divided_space_time" , __UpperCamelCase=None , ) -> Tuple:
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : Dict = image_size
UpperCamelCase_ : Tuple = num_channels
UpperCamelCase_ : Union[str, Any] = patch_size
UpperCamelCase_ : List[str] = num_frames
UpperCamelCase_ : Optional[Any] = is_training
UpperCamelCase_ : int = use_labels
UpperCamelCase_ : Any = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : List[Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[Any] = attention_type
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Optional[int] = scope
UpperCamelCase_ : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCamelCase_ : Dict = (image_size // patch_size) ** 2
UpperCamelCase_ : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def A_ (self ) -> int:
UpperCamelCase_ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : List[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCamelCase_ : List[Any] = self.num_labels
return config
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
UpperCamelCase_ : List[Any] = TimesformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
UpperCamelCase_ : List[Any] = TimesformerForVideoClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Tuple = model(__UpperCamelCase )
# verify the logits shape
UpperCamelCase_ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCamelCase )
def A_ (self ) -> str:
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Tuple = config_and_inputs
UpperCamelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a__ :Optional[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a__ :Optional[Any] = False
a__ :List[str] = False
a__ :Any = False
a__ :Optional[Any] = False
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Dict = TimesformerModelTester(self )
UpperCamelCase_ : Union[str, Any] = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> List[str]:
UpperCamelCase_ : Union[str, Any] = copy.deepcopy(__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
UpperCamelCase_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def A_ (self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def A_ (self ) -> Dict:
pass
def A_ (self ) -> Any:
UpperCamelCase_,UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_,UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] = model_class(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Any = [*signature.parameters.keys()]
UpperCamelCase_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase )
@slow
def A_ (self ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : int = TimesformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A_ (self ) -> int:
if not self.has_attentions:
pass
else:
UpperCamelCase_,UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCamelCase_ : int = self.model_tester.seq_length
UpperCamelCase_ : Tuple = self.model_tester.num_frames
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = True
UpperCamelCase_ : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : int = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ : str = True
UpperCamelCase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : str = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCamelCase_ : str = len(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
UpperCamelCase_ : Any = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A_ (self ) -> Dict:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : int = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : Any = outputs.hidden_states
UpperCamelCase_ : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_ : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase_,UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Tuple = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Dict = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCamelCase_ : List[Any] = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def A_ (self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Tuple = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.default_image_processor
UpperCamelCase_ : Any = prepare_video()
UpperCamelCase_ : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Optional[Any] = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ : Tuple = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ : Tuple = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 138
|
from __future__ import annotations
import pandas as pd
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : List[Any] = [0] * no_of_processes
UpperCamelCase_ : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[Any] = burst_time[i]
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : Dict = 0
UpperCamelCase_ : Tuple = 9_9999_9999
UpperCamelCase_ : str = 0
UpperCamelCase_ : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCamelCase_ : Dict = remaining_time[j]
UpperCamelCase_ : Dict = j
UpperCamelCase_ : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCamelCase_ : Union[str, Any] = remaining_time[short]
if minm == 0:
UpperCamelCase_ : Optional[Any] = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
UpperCamelCase_ : Any = False
# Find finish time of current process
UpperCamelCase_ : str = increment_time + 1
# Calculate waiting time
UpperCamelCase_ : Tuple = finish_time - arrival_time[short]
UpperCamelCase_ : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCamelCase_ : Dict = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ):
UpperCamelCase_ : Union[str, Any] = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Union[str, Any] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Tuple = total_waiting_time + waiting_time[i]
UpperCamelCase_ : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
SCREAMING_SNAKE_CASE : Optional[Any] = int(input())
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = map(int, input().split())
SCREAMING_SNAKE_CASE : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE : str = burst_time
SCREAMING_SNAKE_CASE : List[Any] = no_of_processes
SCREAMING_SNAKE_CASE : Optional[int] = waiting_time
SCREAMING_SNAKE_CASE : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
SCREAMING_SNAKE_CASE : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 138
| 1
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
|
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a_, a_, a_, unittest.TestCase ):
__lowerCAmelCase = StableDiffusionInpaintPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase = frozenset([] )
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
lowercase : List[str] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowercase : Union[str, Any] = CLIPTextModel(_a )
lowercase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __magic_name__ ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowercase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : List[Any] = Image.fromarray(np.uinta(_a ) ).convert("RGB" ).resize((64, 64) )
lowercase : List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_a ).startswith("mps" ):
lowercase : Optional[Any] = torch.manual_seed(_a )
else:
lowercase : Any = torch.Generator(device=_a ).manual_seed(_a )
lowercase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
lowercase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Any = StableDiffusionInpaintPipeline(**_a )
lowercase : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowercase : List[Any] = self.get_dummy_inputs(_a )
lowercase : Dict = sd_pipe(**_a ).images
lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Dict = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
lowercase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowercase : List[str] = "stabilityai/stable-diffusion-2-inpainting"
lowercase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
lowercase : int = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : int = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="np" , )
lowercase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __magic_name__ ( self ):
lowercase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowercase : int = "stabilityai/stable-diffusion-2-inpainting"
lowercase : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
lowercase : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase : Union[str, Any] = torch.manual_seed(0 )
lowercase : Optional[int] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="np" , )
lowercase : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __magic_name__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase : Optional[int] = "stabilityai/stable-diffusion-2-inpainting"
lowercase : Optional[Any] = PNDMScheduler.from_pretrained(_a , subfolder="scheduler" )
lowercase : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase : List[Any] = torch.manual_seed(0 )
lowercase : List[Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="np" , )
lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 703
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __magic_name__ ( __snake_case : List[Any] ) -> Optional[int]:
return x + 2
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Dict = "x = 3"
lowercase : Any = {}
lowercase : List[str] = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3} )
lowercase : List[str] = "x = y"
lowercase : Union[str, Any] = {"y": 5}
lowercase : List[str] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 5, "y": 5} )
def __magic_name__ ( self ):
lowercase : Any = "y = add_two(x)"
lowercase : str = {"x": 3}
lowercase : str = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase : Dict = evaluate(_a , {} , state=_a )
assert result is None
assert "tried to execute add_two" in out.out
def __magic_name__ ( self ):
lowercase : Any = "x = 3"
lowercase : Optional[int] = {}
lowercase : Tuple = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3} )
def __magic_name__ ( self ):
lowercase : int = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase : str = {"x": 3}
lowercase : List[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
self.assertDictEqual(_a , {"x": 3, "y": 5} )
self.assertDictEqual(_a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __magic_name__ ( self ):
lowercase : Dict = "x = 3\ny = 5"
lowercase : int = {}
lowercase : List[Any] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 3, "y": 5} )
def __magic_name__ ( self ):
lowercase : int = "text = f'This is x: {x}.'"
lowercase : Optional[Any] = {"x": 3}
lowercase : Optional[Any] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_a , {"x": 3, "text": "This is x: 3."} )
def __magic_name__ ( self ):
lowercase : Any = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase : List[Any] = {"x": 3}
lowercase : Optional[int] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_a , {"x": 3, "y": 2} )
lowercase : Union[str, Any] = {"x": 8}
lowercase : Optional[int] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 8, "y": 5} )
def __magic_name__ ( self ):
lowercase : List[Any] = "test_list = [x, add_two(x)]"
lowercase : int = {"x": 3}
lowercase : List[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
self.assertListEqual(_a , [3, 5] )
self.assertDictEqual(_a , {"x": 3, "test_list": [3, 5]} )
def __magic_name__ ( self ):
lowercase : List[str] = "y = x"
lowercase : int = {"x": 3}
lowercase : int = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3, "y": 3} )
def __magic_name__ ( self ):
lowercase : List[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase : Optional[Any] = {"x": 3}
lowercase : Optional[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "test_list": [3, 5]} )
lowercase : int = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase : Optional[int] = {"x": 3}
lowercase : Tuple = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __magic_name__ ( self ):
lowercase : str = "x = 0\nfor i in range(3):\n x = i"
lowercase : int = {}
lowercase : Optional[int] = evaluate(_a , {"range": range} , state=_a )
assert result == 2
self.assertDictEqual(_a , {"x": 2, "i": 2} )
| 518
| 0
|
# using dfs for finding eulerian path traversal
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCamelCase , lowerCamelCase : Optional[Any] = True, True
lowerCamelCase : Union[str, Any] = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return path
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = 0
lowerCamelCase : Tuple = -1
for i in range(SCREAMING_SNAKE_CASE_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCamelCase : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCamelCase , lowerCamelCase : Optional[Any] = check_circuit_or_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
lowerCamelCase : Any = 1
if check == 2:
lowerCamelCase : int = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
lowerCamelCase : Any = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : str = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCamelCase : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCamelCase : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCamelCase : Any = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCamelCase : Tuple = {
1: [],
2: []
# all degree is zero
}
lowerCamelCase : Dict = 10
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_euler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 340
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[List[PIL.Image.Image], np.ndarray]
__A : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 340
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = BartphoTokenizer
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = True
def snake_case__ ( self ):
super().setUp()
_lowerCamelCase = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase = {'''unk_token''': '''<unk>'''}
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
_lowerCamelCase = BartphoTokenizer(lowerCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = '''This is a là test'''
_lowerCamelCase = '''This is a<unk><unk> test'''
return input_text, output_text
def snake_case__ ( self ):
_lowerCamelCase = BartphoTokenizer(lowerCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
_lowerCamelCase = '''This is a là test'''
_lowerCamelCase = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_lowerCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = tokens + [tokenizer.unk_token]
_lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 706
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=5_6 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.0_2 , lowerCamelCase__=4 , lowerCamelCase__="block_sparse" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=3 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_choices
_lowerCamelCase = rescale_embeddings
_lowerCamelCase = attention_type
_lowerCamelCase = use_bias
_lowerCamelCase = block_size
_lowerCamelCase = num_random_blocks
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__="outputs" , lowerCamelCase__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 623
| 0
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase_ ( lowerCAmelCase_ ):
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Tuple = tempfile.mkdtemp()
snake_case__ : Dict = 8
# DPR tok
snake_case__ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Tuple = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
snake_case__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case__ : str = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
snake_case__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case__ : Any = {'unk_token': '<unk>'}
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case__ : Dict = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : int = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__lowerCamelCase ) )
def _lowerCAmelCase ( self : Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCAmelCase ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCAmelCase ( self : int ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
snake_case__ : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Optional[Any] = dataset
snake_case__ : List[Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : bool ):
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
snake_case__ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
snake_case__ : List[Any] = os.path.join(self.tmpdirname , 'dataset' )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
snake_case__ : Optional[int] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : Union[str, Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , )
return retriever
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[str] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
snake_case__ : Optional[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__lowerCamelCase , open(__lowerCamelCase , 'wb' ) )
snake_case__ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
snake_case__ : List[str] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : int = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
snake_case__ : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : List[Any] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Any = 1
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : int ):
snake_case__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Any = 1
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : int = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCAmelCase ( self : int ):
snake_case__ : Tuple = 1
snake_case__ : Tuple = self.get_dummy_legacy_index_retriever()
snake_case__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ , snake_case__ , snake_case__ : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
snake_case__ : Dict = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Any ):
import torch
snake_case__ : Optional[Any] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : List[Any] = [[5, 7], [10, 11]]
snake_case__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Any = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
snake_case__ , snake_case__ , snake_case__ : Any = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , np.ndarray )
snake_case__ : Dict = retriever(
__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors='pt' , )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : List[Any] = 1
snake_case__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
retriever.set_ctx_encoder_tokenizer(__lowerCamelCase )
snake_case__ : Optional[int] = [[5, 7], [10, 11]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
self.assertEqual(
len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
| 270
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A_ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
snake_case__ : Optional[Any] = self.transformer_dir
shutil.copy(
os.path.join(__lowerCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def _lowerCAmelCase ( self : int ):
snake_case__ : Union[str, Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None ):
snake_case__ : Optional[int] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
snake_case__ : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
snake_case__ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case__ : Tuple = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
snake_case__ : Union[str, Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__lowerCamelCase , 'w' , newline='\n' ) as f:
f.write(__lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCamelCase )
with open(__lowerCamelCase , 'r' ) as f:
self.assertTrue(f.read() , __lowerCamelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : Any ):
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , )
# Copy consistency with a really long name
snake_case__ : Union[str, Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , __lowerCamelCase , __lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __lowerCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __lowerCamelCase ) , )
def _lowerCAmelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
snake_case__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
snake_case__ , snake_case__ : Optional[Any] = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
self.assertFalse(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
snake_case__ , snake_case__ : Any = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCamelCase )
snake_case__ : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
snake_case__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case__ , snake_case__ : Any = check_copies.convert_to_localized_md(
__lowerCamelCase , __lowerCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 270
| 1
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =TaConfig.from_json_file(__lowerCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase : Optional[int] =TaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 331
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase ='docs/source/en/_toctree.yml'
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : str =defaultdict(__lowerCamelCase )
_UpperCAmelCase : str =[]
_UpperCAmelCase : Optional[Any] =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__lowerCamelCase )
_UpperCAmelCase : List[Any] =new_doc_list
_UpperCAmelCase : Dict =[key for key, value in counts.items() if value > 1]
_UpperCAmelCase : Tuple =[]
for duplicate_key in duplicates:
_UpperCAmelCase : List[Any] =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__lowerCamelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_UpperCAmelCase : Any =sorted(__lowerCamelCase , key=lambda __lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCamelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Any =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Tuple =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Optional[int] =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Dict =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_UpperCAmelCase : Any =api_doc[scheduler_idx]['sections']
_UpperCAmelCase : Any =clean_doc_toc(__lowerCamelCase )
_UpperCAmelCase : Dict =False
if new_scheduler_doc != scheduler_doc:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : List[Any] =new_scheduler_doc
if diff:
if overwrite:
_UpperCAmelCase : List[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf-8' ) as f:
_UpperCAmelCase : Union[str, Any] =yaml.safe_load(f.read() )
# Get to the API doc
_UpperCAmelCase : Optional[int] =0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCAmelCase : Dict =content[api_idx]['sections']
# Then to the model doc
_UpperCAmelCase : Optional[Any] =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_UpperCAmelCase : Tuple =False
_UpperCAmelCase : int =api_doc[pipeline_idx]['sections']
_UpperCAmelCase : List[Any] =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_UpperCAmelCase : Dict =pipeline_doc['section']
_UpperCAmelCase : Tuple =clean_doc_toc(__lowerCamelCase )
if overwrite:
_UpperCAmelCase : List[str] =new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCamelCase )
# sort overall pipeline doc
_UpperCAmelCase : Union[str, Any] =clean_doc_toc(__lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_UpperCAmelCase : Optional[Any] =True
if overwrite:
_UpperCAmelCase : Optional[int] =new_pipeline_docs
if diff:
if overwrite:
_UpperCAmelCase : Optional[Any] =api_doc
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCamelCase , allow_unicode=__lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 331
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''openai/whisper-base'''
_lowerCamelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
_lowerCamelCase = '''transcriber'''
_lowerCamelCase = WhisperProcessor
_lowerCamelCase = WhisperForConditionalGeneration
_lowerCamelCase = ['''audio''']
_lowerCamelCase = ['''text''']
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return self.model.generate(inputs=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
| 490
|
"""simple docstring"""
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
__magic_name__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
__magic_name__ = 0
while b > 0:
if b & 1:
__magic_name__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 490
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A : str = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A : Optional[int] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowerCamelCase ( __a :str , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
A__ = int(re.match(R""".*layer_(\d*).*""" , __a )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __lowerCamelCase ( __a :Optional[int] ) -> List[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
A__ = re.search(R"""[^\d](\d+)$""" , str(__a ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
A__ = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( __a :Union[str, Any] , __a :List[Any] , __a :int , __a :List[str] , __a :List[Any] ) -> Any:
"""simple docstring"""
if bloom_config_file == "":
A__ = BloomConfig()
else:
A__ = BloomConfig.from_json_file(__a )
if shard_model:
A__ = os.listdir(__a )
A__ = sorted(filter(lambda __a : s.startswith("""layer""" ) and "model_00" in s , __a ) )
A__ = {"""weight_map""": {}, """metadata""": {}}
A__ = 0
A__ = None
A__ = BloomConfig()
for j, file in enumerate(__a ):
print("""Processing file: {}""".format(__a ) )
A__ = None
for i in range(__a ):
# load all TP files
A__ = file.replace("""model_00""" , F'model_0{i}' )
A__ = torch.load(os.path.join(__a , __a ) , map_location="""cpu""" )
# Rename keys in the transformers names
A__ = list(temp.keys() )
for key in keys:
A__ = temp.pop(__a )
if tensors is None:
A__ = temp
else:
for key in tensors.keys():
if any(key.endswith(__a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ = torch.cat([tensors[key], temp[key]] , dim=__a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ = tensors[key] / pretraining_tp
torch.save(
__a , os.path.join(
__a , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
A__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
A__ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__a ) ).zfill(5 ) )
A__ = BloomConfig()
A__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
A__ = total_size
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__a , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
A__ = json.dumps(__a , indent=2 , sort_keys=__a ) + """\n"""
f.write(__a )
else:
A__ = BloomModel(__a )
A__ = os.listdir(__a )
A__ = sorted(filter(lambda __a : s.startswith("""layer""" ) and "model_00" in s , __a ) )
A__ = None
for i, file in enumerate(__a ):
A__ = None
for i in range(__a ):
# load all TP files
A__ = file.replace("""model_00""" , F'model_0{i}' )
A__ = torch.load(os.path.join(__a , __a ) , map_location="""cpu""" )
# Rename keys in the transformers names
A__ = list(temp.keys() )
for key in keys:
A__ = temp.pop(__a )
if tensors is None:
A__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ = torch.cat([tensors[key], temp[key]] , dim=__a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ = tensors[key] / pretraining_tp
A__ = model.load_state_dict(__a , strict=__a )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
A__ = set(other_keys.missing_keys )
else:
A__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__a , exist_ok=__a )
A__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
A__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , __a )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 714
|
from __future__ import annotations
A : Optional[int] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __a :float , __a :float , __a :float , __a :float ) -> dict[str, float]:
"""simple docstring"""
A__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase_ ( ) -> List[Any]:
a_ : Optional[int] = torch.nn.Linear(2, 4 )
a_ : Optional[int] = torch.optim.AdamW(model.parameters(), lr=1.0 )
a_ : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(lowercase__, max_lr=0.01, steps_per_epoch=2, epochs=1 )
a_ : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a_ : Any = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class snake_case_ ( lowercase_ ):
@require_cuda
def snake_case_ ( self ):
a_ : int = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
a_ : int = Accelerator(cpu=a_ )
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ : str = GradientState()
assert state.num_steps == 1
a_ : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a_ : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ , a_ , a_ , a_ , a_ : List[str] = create_components()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def snake_case_ ( self ):
a_ : Union[str, Any] = Accelerator()
a_ , a_ , a_ , a_ , a_ : Any = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def snake_case_ ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch("torch.cuda.set_device" , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
a_ : int = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def snake_case_ ( self ):
a_ : Dict = Accelerator()
a_ , a_ , a_ , a_ , a_ : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
a_ : Dict = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
def snake_case_ ( self ):
a_ : int = Accelerator()
a_ , a_ , a_ , a_ , a_ : int = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
a_ : Union[str, Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
a_ : Any = {"class_name": models[0].__class__.__name__}
with open(os.path.join(a_ , "data.json" ) , "w" ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , "data.json" ) , "r" ) as f:
a_ : Any = json.load(a_ )
a_ : int = config["class_name"]
a_ : List[str] = accelerator.register_save_state_pre_hook(a_ )
a_ : Any = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a_ : Tuple = "random"
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def snake_case_ ( self ):
a_ : Tuple = Accelerator()
a_ , a_ , a_ , a_ , a_ : Optional[Any] = create_components()
a_ : Union[str, Any] = None
# This should work
a_ , a_ , a_ , a_ , a_ , a_ : Optional[Any] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def snake_case_ ( self ):
a_ : str = Accelerator()
a_ , a_ , a_ , a_ , a_ : Optional[Any] = create_components()
a_ : Optional[int] = [1, 2, 3]
# This should work
a_ , a_ , a_ , a_ , a_ , a_ : Optional[int] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a_ , "_is_accelerate_prepared" , a_ ) , a_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map={"": 0} , )
a_ : Dict = Accelerator()
# This should work
a_ : List[Any] = accelerator.prepare(a_ )
@slow
@require_bnb
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : str = Accelerator()
with init_empty_weights():
a_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ : List[Any] = infer_auto_device_map(a_ )
a_ : List[Any] = "cpu"
a_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
a_ : List[str] = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
a_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a_ : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ : Dict = infer_auto_device_map(a_ )
a_ : List[Any] = 1
a_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
a_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
a_ : List[str] = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def snake_case_ ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a_ : Optional[Any] = infer_auto_device_map(a_ )
a_ : str = 1
a_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a_ , device_map=a_ , )
a_ : Dict = Accelerator()
# This should work
a_ : Optional[Any] = accelerator.prepare(a_ )
@require_cuda
def snake_case_ ( self ):
a_ : Tuple = torch.nn.Linear(1_0 , 1_0 )
a_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
a_ : Dict = Accelerator(cpu=a_ )
a_ : Union[str, Any] = accelerator.prepare(a_ )
| 237
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685
| 0
|
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Any="sayef/fsner-bert-base-uncased" ) -> Optional[int]:
"""simple docstring"""
super(UpperCamelCase__ , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1E-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def _lowercase ( self : int , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.bert(**UpperCamelCase__ ).last_hidden_state
def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=1 ) -> List[Any]:
"""simple docstring"""
return self.softmax(T * self.cos(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
__magic_name__ = W_supports["""sizes"""].tolist()
__magic_name__ = W_supports["""start_token_id"""].item()
__magic_name__ = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**UpperCamelCase__ )
__magic_name__ = self.BERT(**UpperCamelCase__ )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["""input_ids"""] == start_token_id
__magic_name__ = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(UpperCamelCase__ ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 705
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""pixel_values"""]
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = size if size is not None else {"""shortest_edge""": 256}
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = resample
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_rescale
__magic_name__ = rescale_factor
__magic_name__ = do_normalize
__magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
__magic_name__ = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict:
"""simple docstring"""
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = size if size is not None else self.size
__magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
__magic_name__ = resample if resample is not None else self.resample
__magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ = crop_size if crop_size is not None else self.crop_size
__magic_name__ = get_size_dict(UpperCamelCase__ )
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ = image_mean if image_mean is not None else self.image_mean
__magic_name__ = image_std if image_std is not None else self.image_std
__magic_name__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
__magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
__magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
__magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__magic_name__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 76
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
SCREAMING_SNAKE_CASE__ : Optional[int] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 643
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = 'https://openaipublic.azureedge.net/jukebox/models/'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a__ ( snake_case__ : Union[str, Any] ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Dict = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : List[str] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_UpperCAmelCase : int = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase : Union[str, Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
_UpperCAmelCase : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def a__ ( snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str ):
_UpperCAmelCase : int = {}
import re
_UpperCAmelCase : Tuple = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Any = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : List[str] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[int] = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_encoder_block_conv_in.match(snake_case__ )
_UpperCAmelCase : Tuple = regex_match.groups()
_UpperCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Union[str, Any] = re_encoder_block_conv_in.sub(snake_case__ , snake_case__ )
elif re_encoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : Tuple = re_encoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[int] = prefix + resnet_block
_UpperCAmelCase : Any = re_encoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_encoder_block_proj_out.fullmatch(snake_case__ ):
_UpperCAmelCase : List[str] = re_encoder_block_proj_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_encoder_block_proj_out.sub(snake_case__ , snake_case__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_decoder_block_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_conv_out.sub(snake_case__ , snake_case__ )
elif re_decoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : List[Any] = prefix + resnet_block
_UpperCAmelCase : Union[str, Any] = re_decoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_decoder_block_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_proj_in.match(snake_case__ )
_UpperCAmelCase : int = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_proj_in.sub(snake_case__ , snake_case__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_prior_cond_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : Optional[int] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : str = re_prior_cond_conv_out.sub(snake_case__ , snake_case__ )
elif re_prior_cond_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_prior_cond_resnet.match(snake_case__ )
_UpperCAmelCase : str = regex_match.groups()
_UpperCAmelCase : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : str = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_UpperCAmelCase : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : int = prefix + resnet_block
_UpperCAmelCase : str = re_prior_cond_resnet.sub(snake_case__ , snake_case__ )
elif re_prior_cond_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : Union[str, Any] = re_prior_cond_proj_in.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Optional[Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_prior_cond_proj_in.sub(snake_case__ , snake_case__ )
# keep original key
else:
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = replace_key(snake_case__ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_UpperCAmelCase : List[str] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = original_key
_UpperCAmelCase : int = value
return new_dict
@torch.no_grad()
def a__ ( snake_case__ : Any=None , snake_case__ : Optional[Any]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_UpperCAmelCase : Dict = requests.get(f'''{PREFIX}{file}''' , allow_redirects=snake_case__ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=snake_case__ )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
_UpperCAmelCase : str = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_UpperCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(snake_case__ )
_UpperCAmelCase : int = JukeboxModel(snake_case__ )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = {}
for i, dict_name in enumerate(snake_case__ ):
_UpperCAmelCase : Any = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
_UpperCAmelCase : Dict = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_UpperCAmelCase : Any = old_dic[k]
elif k.endswith(""".w""" ):
_UpperCAmelCase : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase : str = old_dic[k]
else:
_UpperCAmelCase : Dict = old_dic[k]
_UpperCAmelCase : Tuple = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
_UpperCAmelCase : str = fix_jukebox_keys(snake_case__ , model.state_dict() , snake_case__ , snake_case__ )
weight_dict.append(snake_case__ )
_UpperCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case__ )
for i in range(len(snake_case__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(snake_case__ , snake_case__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 643
| 1
|
"""simple docstring"""
from collections.abc import Generator
def lowerCamelCase__ ( )-> Generator[int, None, None]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = 0, 1
while True:
UpperCamelCase , UpperCamelCase = b, a + b
yield b
def lowerCamelCase__ ( UpperCAmelCase_ = 10_00 )-> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = fibonacci_generator()
while len(str(next(UpperCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
| 556
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( __lowerCamelCase ):
@slow
@require_torch
def _lowerCAmelCase ( self : int ):
lowercase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
lowercase : Any = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase : List[Any] = bertabert.config.encoder.vocab_size
lowercase : Union[str, Any] = tokenizer.sep_token_id
lowercase : Dict = tokenizer.cls_token_id
lowercase : List[str] = 128
lowercase : List[str] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
lowercase : str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
lowercase : Optional[int] = train_dataset.select(range(32 ) )
lowercase : Optional[int] = val_dataset.select(range(16 ) )
lowercase : List[Any] = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase : int = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=lowerCAmelCase , max_length=512 )
lowercase : List[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=lowerCAmelCase , max_length=128 )
lowercase : List[Any] = inputs.input_ids
lowercase : List[str] = inputs.attention_mask
lowercase : Optional[Any] = outputs.input_ids
lowercase : Dict = outputs.input_ids.copy()
lowercase : Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase : List[str] = outputs.attention_mask
assert all(len(lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase : int ):
lowercase : Tuple = pred.label_ids
lowercase : int = pred.predictions
# all unnecessary tokens are removed
lowercase : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
lowercase : List[str] = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
lowercase : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase ) )] ) / len(lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
lowercase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
lowercase : Tuple = self.get_auto_remove_tmp_dir()
lowercase : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase , per_device_train_batch_size=lowerCAmelCase , per_device_eval_batch_size=lowerCAmelCase , predict_with_generate=lowerCAmelCase , evaluation_strategy='''steps''' , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase : List[Any] = SeqaSeqTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# start training
trainer.train()
| 583
|
from PIL import Image
def lowerCamelCase_ ( UpperCAmelCase_ : Image , UpperCAmelCase_ : int ):
lowercase : str = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(UpperCAmelCase_ : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(UpperCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
snake_case__ = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 583
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_ ( lowercase , lowercase ):
@register_to_config
def __init__( self , *,
lowerCamelCase_ = 4 , lowerCamelCase_ = 7_68 , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = nn.Parameter(torch.zeros(lowerCamelCase_ ) )
# parameters for additional clip time embeddings
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
# parameters for encoder hidden states
_UpperCamelCase = clip_extra_context_tokens
_UpperCamelCase = nn.Linear(
lowerCamelCase_ , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
def lowercase ( self , *, lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCamelCase = image_embeddings.shape[0]
_UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCamelCase = classifier_free_guidance_embeddings.expand(
lowerCamelCase_ , -1 )
_UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCamelCase = self.embedding_proj(lowerCamelCase_ )
_UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase_ )
_UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCamelCase = self.clip_extra_context_tokens_proj(lowerCamelCase_ )
_UpperCamelCase = clip_extra_context_tokens.reshape(lowerCamelCase_ , -1 , self.clip_extra_context_tokens )
_UpperCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCamelCase = self.encoder_hidden_states_proj(lowerCamelCase_ )
_UpperCamelCase = self.text_encoder_hidden_states_norm(lowerCamelCase_ )
_UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 589
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( a__ : Any , a__ : int , a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> int:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase = getattr(a__ , a__ )
if weight_type is not None:
_UpperCamelCase = getattr(a__ , a__ ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowercase ( a__ : List[str] , a__ : Any ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == "group" , )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(a__ )[0].split("." )[-2]
_UpperCamelCase = mapped_key.replace("*" , a__ )
if "weight_g" in name:
_UpperCamelCase = "weight_g"
elif "weight_v" in name:
_UpperCamelCase = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCamelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCamelCase = "weight"
else:
_UpperCamelCase = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowercase ( a__ : Any , a__ : int , a__ : List[Any] , a__ : str , a__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = full_name.split("conv_layers." )[-1]
_UpperCamelCase = name.split("." )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def _lowercase ( a__ : List[Any] , a__ : Tuple , a__ : List[str]=None ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.load(a__ )
_UpperCamelCase = WavLMConfigOrig(checkpoint["cfg"] )
_UpperCamelCase = WavLMOrig(a__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_UpperCamelCase = WavLMConfig.from_pretrained(a__ )
else:
_UpperCamelCase = WavLMConfig()
_UpperCamelCase = WavLMModel(a__ )
recursively_load_weights(a__ , a__ )
hf_wavlm.save_pretrained(a__ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 589
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : Dict =0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase : Any =''''''
lowercase : Optional[int] =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__magic_name__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase , lowercase : Tuple =0, 0
# length[i] shows the length of palindromic substring with center i
lowercase : Any =[1 for i in range(len(__magic_name__ ) )]
# for each character in new_string find corresponding palindromic string
lowercase : Dict =0
for j in range(len(__magic_name__ ) ):
lowercase : Optional[int] =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__magic_name__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase : Optional[Any] =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase : Optional[Any] =j - k + 1 # noqa: E741
lowercase : Tuple =j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase : int =length[j]
lowercase : Optional[Any] =j
# create that string
lowercase : Dict =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 86
| 0
|
"""simple docstring"""
def __lowercase ( a : int ) -> int:
if n == 1 or not isinstance(a , a ):
return 0
elif n == 2:
return 1
else:
__snake_case : Optional[Any] =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowercase ( a : int ) -> int:
__snake_case : Any =0
__snake_case : List[str] =2
while digits < n:
index += 1
__snake_case : int =len(str(fibonacci(a ) ) )
return index
def __lowercase ( a : int = 1_000 ) -> int:
return fibonacci_digits_index(a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 497
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCamelCase_ : Optional[int] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCamelCase_ : List[Any] = """|""".join(sys.argv[1:])
UpperCamelCase_ : List[str] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
UpperCamelCase_ : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 497
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[indexa], array[indexa]
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
for i in range(__UpperCAmelCase , low + middle ):
comp_and_swap(__UpperCAmelCase , __UpperCAmelCase , i + middle , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , low + middle , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
bitonic_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 1 )
bitonic_sort(__UpperCAmelCase , low + middle , __UpperCAmelCase , 0 )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 31
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Dict=False ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCamelCase__ = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCamelCase__ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_snake_case )-1}' )
if "norm" in key:
UpperCamelCase__ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCamelCase__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_snake_case )-1}' )
if "layer_norm1" in key:
UpperCamelCase__ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase__ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ = key[key.find("block" ) + len("block" )]
UpperCamelCase__ = key.replace(F'block{idx}' , F'block.{int(_snake_case )-1}' )
if "attn.q" in key:
UpperCamelCase__ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase__ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase__ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase__ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase__ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase__ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase__ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase__ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(_snake_case )-1}' )
if key.startswith("head" ):
UpperCamelCase__ = key.replace("head" , "classifier" )
UpperCamelCase__ = value
return new_state_dict
def snake_case__ ( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ = kv_bias[
config.hidden_sizes[i] :
]
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case__ ( _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = SegformerConfig()
UpperCamelCase__ = False
# set attributes based on model_name
UpperCamelCase__ = "huggingface/label-files"
if "segformer" in model_name:
UpperCamelCase__ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
UpperCamelCase__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
UpperCamelCase__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
UpperCamelCase__ = True
UpperCamelCase__ = model_name[4:6]
UpperCamelCase__ = 10_00
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = (1, 10_00)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
UpperCamelCase__ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 2_56
elif size == "b2":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
UpperCamelCase__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )
else:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCamelCase__ = rename_keys(_snake_case , encoder_only=_snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase__ = False
UpperCamelCase__ = SegformerForImageClassification(_snake_case )
else:
UpperCamelCase__ = SegformerForSemanticSegmentation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCamelCase__ = model(_snake_case )
UpperCamelCase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase__ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCamelCase__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 516
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = model.config
UpperCAmelCase__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
UpperCAmelCase__ : str = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def _UpperCamelCase ( UpperCamelCase__ ):
if "encoder.model" in name:
UpperCAmelCase__ : Dict = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
UpperCAmelCase__ : Tuple = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : Any = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : Any = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
UpperCAmelCase__ : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
UpperCAmelCase__ : int = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
UpperCAmelCase__ : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase__ : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
UpperCAmelCase__ : Optional[Any] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
UpperCAmelCase__ : Tuple = """encoder.layernorm.bias"""
return name
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
UpperCAmelCase__ : Optional[Any] = key.split(""".""" )
UpperCAmelCase__ : Optional[Any] = int(key_split[3] )
UpperCAmelCase__ : Optional[Any] = int(key_split[5] )
UpperCAmelCase__ : List[str] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : Any = val[:dim, :]
UpperCAmelCase__ : int = val[dim : dim * 2, :]
UpperCAmelCase__ : Dict = val[-dim:, :]
else:
UpperCAmelCase__ : Tuple = val[:dim]
UpperCAmelCase__ : Union[str, Any] = val[dim : dim * 2]
UpperCAmelCase__ : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase__ : List[str] = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
# load original model
UpperCAmelCase__ : Dict = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_configs(UpperCamelCase__ )
UpperCAmelCase__ : Any = DonutSwinModel(UpperCamelCase__ )
UpperCAmelCase__ : str = MBartForCausalLM(UpperCamelCase__ )
UpperCAmelCase__ : Any = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
UpperCAmelCase__ : Any = original_model.state_dict()
UpperCAmelCase__ : Optional[Any] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
UpperCAmelCase__ : Tuple = load_dataset("""hf-internal-testing/example-documents""" )
UpperCAmelCase__ : Union[str, Any] = dataset["""test"""][0]["""image"""].convert("""RGB""" )
UpperCAmelCase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase__ : List[str] = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : int = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase__ : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
UpperCAmelCase__ : Any = """When is the coffee break?"""
UpperCAmelCase__ : Union[str, Any] = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase__ : Optional[Any] = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase__ : int = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase__ : Optional[Any] = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase__ : Dict = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase__ : Union[str, Any] = """hello world"""
else:
raise ValueError("""Model name not supported""" )
UpperCAmelCase__ : List[Any] = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
UpperCAmelCase__ : List[Any] = original_model.encoder.model.patch_embed(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
UpperCAmelCase__ : str = original_model.encoder(UpperCamelCase__ )
UpperCAmelCase__ : Any = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
UpperCAmelCase__ : Tuple = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
UpperCAmelCase__ : Tuple = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__A =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=0):
UpperCAmelCase__ : Dict = 1.0 if scale is None else scale
UpperCAmelCase__ : Dict = 0.0 if loc is None else loc
super().__init__(_lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCamelCase)])
@property
def snake_case__ ( self):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self):
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self):
return self.variance.sqrt()
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : int = args_dim
UpperCAmelCase__ : Optional[int] = nn.ModuleList([nn.Linear(_lowerCamelCase , _lowerCamelCase) for dim in args_dim.values()])
UpperCAmelCase__ : Optional[Any] = domain_map
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = [proj(_lowerCamelCase) for proj in self.proj]
return self.domain_map(*_lowerCamelCase)
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : Optional[int] = function
def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase):
return self.function(_lowerCamelCase , *_lowerCamelCase)
class _snake_case :
lowerCAmelCase :type
lowerCAmelCase :int
lowerCAmelCase :Dict[str, int]
def __init__( self , _lowerCamelCase = 1):
UpperCAmelCase__ : Optional[Any] = dim
UpperCAmelCase__ : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self , _lowerCamelCase):
if self.dim == 1:
return self.distribution_class(*_lowerCamelCase)
else:
return Independent(self.distribution_class(*_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self._base_distribution(_lowerCamelCase)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowerCamelCase , loc=_lowerCamelCase , scale=_lowerCamelCase , event_dim=self.event_dim)
@property
def snake_case__ ( self):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self):
return len(self.event_shape)
@property
def snake_case__ ( self):
return 0.0
def snake_case__ ( self , _lowerCamelCase):
return ParameterProjection(
in_features=_lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def snake_case__ ( self , *_lowerCamelCase):
raise NotImplementedError()
@staticmethod
def snake_case__ ( _lowerCamelCase):
return (x + torch.sqrt(torch.square(_lowerCamelCase) + 4.0)) / 2.0
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase :type = StudentT
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
UpperCAmelCase__ : Any = 2.0 + cls.squareplus(_lowerCamelCase)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
lowerCAmelCase :type = Normal
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCAmelCase :type = NegativeBinomial
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase)
return total_count.squeeze(-1), logits.squeeze(-1)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase)
else:
return Independent(self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None):
UpperCAmelCase__ , UpperCAmelCase__ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 113
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : int = 5_00_00
__UpperCAmelCase : Optional[int] = 50_00
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = os.path.split(__file__)
__UpperCAmelCase : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def A ( _A, _A ):
"""simple docstring"""
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ :Tuple = dataset[i]
@get_duration
def A ( _A, _A, _A ):
"""simple docstring"""
for i in range(0, len(_SCREAMING_SNAKE_CASE ), _SCREAMING_SNAKE_CASE ):
snake_case_ :List[Any] = dataset[i : i + batch_size]
@get_duration
def A ( _A, _A, _A ):
"""simple docstring"""
with dataset.formatted_as(type=_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ :Optional[Any] = dataset[i]
@get_duration
def A ( _A, _A, _A, _A ):
"""simple docstring"""
with dataset.formatted_as(type=_SCREAMING_SNAKE_CASE ):
for i in range(0, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
snake_case_ :List[Any] = dataset[i : i + batch_size]
def A ( ):
"""simple docstring"""
snake_case_ :Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
snake_case_ :Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
snake_case_ :Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
snake_case_ :Any = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
snake_case_ :Optional[int] = generate_example_dataset(
os.path.join(_SCREAMING_SNAKE_CASE, "dataset.arrow" ), _SCREAMING_SNAKE_CASE, num_examples=_SCREAMING_SNAKE_CASE, seq_shapes={"list": (100,)}, )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__, str(_SCREAMING_SNAKE_CASE ) )
snake_case_ :Any = func(_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
print("shuffling dataset" )
snake_case_ :List[Any] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled ", func.__name__, str(_SCREAMING_SNAKE_CASE ) )
snake_case_ :Tuple = func(
_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE, "wb" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 584
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = PriorTransformer
lowerCAmelCase__ : int = """hidden_states"""
@property
def A ( self ) -> Tuple:
a_ : Union[str, Any] = 4
a_ : Tuple = 8
a_ : Dict = 7
a_ : Dict = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self , _SCREAMING_SNAKE_CASE=0 ) -> int:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : int = 4
a_ : Dict = 8
a_ : Dict = 7
a_ : Dict = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def A ( self ) -> Optional[Any]:
return (4, 8)
@property
def A ( self ) -> Any:
return (4, 8)
def A ( self ) -> List[str]:
a_ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def A ( self ) -> Dict:
a_ , a_ : str = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def A ( self ) -> str:
a_ , a_ : str = self.prepare_init_args_and_inputs_for_common()
a_ : List[Any] = self.model_class(**_SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Any = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def A ( self ) -> Any:
a_ : Tuple = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
a_ : Union[str, Any] = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , "set_default_attn_processor" ):
model.set_default_attn_processor()
a_ : List[Any] = self.get_dummy_seed_input()
with torch.no_grad():
a_ : Dict = model(**_SCREAMING_SNAKE_CASE )[0]
a_ : Any = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : Optional[int] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=7_7 , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
torch.manual_seed(_SCREAMING_SNAKE_CASE )
a_ : List[str] = batch_size
a_ : Optional[Any] = embedding_dim
a_ : Tuple = num_embeddings
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
a_ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
a_ : List[Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(_SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
a_ : int = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 7_6_8]
a_ : str = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
a_ : int = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 473
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Any = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__(self : int , **A__ : Optional[Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
setattr(self , A__ , not kwargs.pop(A__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
lowercase = kwargs.pop("torchscript" , self.torchscript )
lowercase = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowercase = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**A__ )
UpperCAmelCase : bool = field(default=_lowercase , metadata={'''help''': '''Trace the models using torchscript'''} )
UpperCAmelCase : bool = field(default=_lowercase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
UpperCAmelCase : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def UpperCAmelCase__ (self : str ) -> Tuple["torch.device", int]:
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowercase = torch.device("cpu" )
lowercase = 0
elif is_torch_tpu_available():
lowercase = xm.xla_device()
lowercase = 0
else:
lowercase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ (self : int ) -> List[str]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ (self : List[str] ) -> int:
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ (self : Dict ) -> "torch.device":
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ (self : Dict ) -> int:
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
return self.n_gpu > 0
| 459
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Optional[Any] = '''MCTCTFeatureExtractor'''
UpperCAmelCase : Tuple = '''AutoTokenizer'''
def __init__(self : int , A__ : Tuple , A__ : Union[str, Any] ) -> Dict:
super().__init__(A__ , A__ )
lowercase = self.feature_extractor
lowercase = False
def __call__(self : Tuple , *A__ : str , **A__ : Dict ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowercase = kwargs.pop("raw_speech" )
else:
lowercase = kwargs.pop("audio" , A__ )
lowercase = kwargs.pop("sampling_rate" , A__ )
lowercase = kwargs.pop("text" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
lowercase = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase = encodings["input_ids"]
return inputs
def UpperCAmelCase__ (self : Tuple , *A__ : str , **A__ : str ) -> str:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCAmelCase__ (self : Any , *A__ : List[Any] , **A__ : List[str] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
lowercase = kwargs.pop("input_features" , A__ )
lowercase = kwargs.pop("labels" , A__ )
if len(A__ ) > 0:
lowercase = args[0]
lowercase = args[1:]
if input_features is not None:
lowercase = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
lowercase = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase = labels["input_ids"]
return input_features
def UpperCAmelCase__ (self : Tuple , *A__ : Optional[int] , **A__ : Optional[int] ) -> Tuple:
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def UpperCAmelCase__ (self : Optional[Any] ) -> Union[str, Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.feature_extractor
lowercase = False
| 459
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=[30, 30] , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=10 , ):
"""simple docstring"""
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : int = n_targets
lowerCAmelCase__ : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ : str = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ : Dict = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ : Union[str, Any] = []
for i in range(self.batch_size ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=__lowerCAmelCase )
labels.append(__lowerCAmelCase )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = YolosModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCAmelCase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = YolosForObjectDetection(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCAmelCase__ : Dict = model(pixel_values=__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ : Tuple = model(pixel_values=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__a = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__a = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__a = False
__a = False
__a = False
__a = False
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ : List[str] = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowerCAmelCase , dtype=torch.long )
lowerCAmelCase__ : int = torch.ones(
self.model_tester.n_targets , 4 , device=__lowerCAmelCase , dtype=torch.float )
labels.append(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = labels
return inputs_dict
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = YolosModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(__lowerCAmelCase )
lowerCAmelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ : Any = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ : Dict = len(__lowerCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase__ : int = 1
self.assertEqual(out_len + added_hidden_states , len(__lowerCAmelCase ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# YOLOS has a different seq_length
lowerCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : str = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowerCAmelCase )
@slow
def lowercase_ ( self ):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = YolosModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _a ( ):
lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ : Optional[int] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowerCAmelCase__ : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__lowerCAmelCase , )
lowerCAmelCase__ : Any = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
# verify postprocessing
lowerCAmelCase__ : int = image_processor.post_process_object_detection(
__lowerCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ : Tuple = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(__lowerCAmelCase )
lowerCAmelCase__ : int = [75, 75, 17, 63, 17]
lowerCAmelCase__ : str = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__lowerCAmelCase )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __lowerCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __lowerCAmelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __lowerCAmelCase ) )
| 233
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
A__ = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
A__ = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class A :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
A__ = dataset
A__ = name
A__ = con
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = to_sql_kwargs
def a_ ( self : Any ) -> int:
"""simple docstring"""
A__ = self.to_sql_kwargs.pop("""sql""" , __lowerCAmelCase )
A__ = self.to_sql_kwargs.pop("""con""" , __lowerCAmelCase )
A__ = self.to_sql_kwargs.pop("""index""" , __lowerCAmelCase )
A__ = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ , A__ = args
A__ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
A__ = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas()
A__ = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 176
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Dataset.from_dict(lowerCamelCase_ )
return dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_dataset()
SCREAMING_SNAKE_CASE_ : List[str] = make_duplicate_clusters(snake_case__ ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = get_dataset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) ,2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] ,2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] ,snake_case__ )
| 685
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _A , )
super().__init__(*_A , **_A )
| 239
|
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a ) -> Optional[Any]:
__A : List[Any] = int(np.ceil((x_end - xa) / h ) )
__A : Tuple = np.zeros((n + 1,) )
__A : Tuple = ya
__A : Optional[Any] = xa
for k in range(a ):
__A : List[Any] = f(a , y[k] )
__A : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__A : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__A : Dict = f(x + h , y[k] + h * ka )
__A : Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a__ ):
SCREAMING_SNAKE_CASE__ = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE__ = "ChineseCLIPImageProcessor"
SCREAMING_SNAKE_CASE__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase_ , )
_lowerCAmelCase = kwargs.pop('''feature_extractor''' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
_lowerCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase_ , )
return self.image_processor_class
| 710
|
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = torch.nn.Linear(1_0 , 1_0)
lowercase_ = torch.optim.SGD(model.parameters() , 0.1)
lowercase_ = Accelerator()
lowercase_ = accelerator.prepare(lowerCAmelCase_)
try:
pickle.loads(pickle.dumps(lowerCAmelCase_))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 567
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 567
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : List[Any] = ["""input_features"""]
def __init__( self , lowerCamelCase__=80 , lowerCamelCase__=1_6000 , lowerCamelCase__=160 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=0.0 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = n_fft
__lowercase = hop_length
__lowercase = chunk_length
__lowercase = chunk_length * sampling_rate
__lowercase = self.n_samples // hop_length
__lowercase = sampling_rate
__lowercase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowerCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def snake_case__ ( self , lowerCamelCase__ ) -> np.ndarray:
"""simple docstring"""
__lowercase = spectrogram(
lowerCamelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__lowercase = log_spec[:, :-1]
__lowercase = np.maximum(lowerCamelCase__ , log_spec.max() - 8.0 )
__lowercase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__lowercase = np.array(lowerCamelCase__ , np.intaa )
__lowercase = []
for vector, length in zip(lowerCamelCase__ , attention_mask.sum(-1 ) ):
__lowercase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__lowercase = padding_value
normed_input_values.append(lowerCamelCase__ )
else:
__lowercase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "max_length" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowercase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__lowercase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__lowercase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [np.asarray([raw_speech] ).T]
__lowercase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__lowercase = self.pad(
lowerCamelCase__ , padding=lowerCamelCase__ , max_length=max_length if max_length else self.n_samples , truncation=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__lowercase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__lowercase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__lowercase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__lowercase = [self._np_extract_fbank_features(lowerCamelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCamelCase__ ):
__lowercase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_features]
else:
__lowercase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__lowercase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__lowercase = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
def snake_case__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 163
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
A : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( __A ):
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 163
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_lowerCamelCase : Tuple = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : List[str] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape ,__lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCAmelCase ,atol=1e-3 ) )
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Optional[int] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape ,__lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCAmelCase ,atol=1e-3 ) )
| 46
|
"""simple docstring"""
from __future__ import annotations
def A_ (__a , __a = None , __a = None ):
'''simple docstring'''
if start is None:
A_ = 0
if end is None:
A_ = len(__a ) - 1
if start >= end:
return
A_ = (start + end) // 2
slowsort(__a , __a , __a )
slowsort(__a , mid + 1 , __a )
if sequence[end] < sequence[mid]:
A_ , A_ = sequence[mid], sequence[end]
slowsort(__a , __a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 115
| 0
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Union[str, Any]:
lowerCAmelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> int:
lowerCAmelCase__ = huggingface_hub.__version__
lowerCAmelCase__ = "not installed"
lowerCAmelCase__ = "NA"
if is_torch_available():
import torch
lowerCAmelCase__ = torch.__version__
lowerCAmelCase__ = torch.cuda.is_available()
lowerCAmelCase__ = "not installed"
if is_transformers_available():
import transformers
lowerCAmelCase__ = transformers.__version__
lowerCAmelCase__ = "not installed"
if is_accelerate_available():
import accelerate
lowerCAmelCase__ = accelerate.__version__
lowerCAmelCase__ = "not installed"
if is_xformers_available():
import xformers
lowerCAmelCase__ = xformers.__version__
lowerCAmelCase__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(SCREAMING_SNAKE_CASE__ ) )
return info
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : int ) -> str:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 705
|
from __future__ import annotations
def _A ( lowerCAmelCase_ : list[int | str] ):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase_ , [] , 0 , [0 for i in range(len(lowerCAmelCase_ ) )] )
def _A ( lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : list[int | str] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[int] , ):
"""simple docstring"""
if index == len(lowerCAmelCase_ ):
print(lowerCAmelCase_ )
return
for i in range(len(lowerCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ = True
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , lowerCAmelCase_ )
current_sequence.pop()
lowerCAmelCase__ = False
UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 125
| 0
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a : int = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> List[Any]:
__snake_case = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
a : List[str] = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int:
__snake_case = list(s_dict.keys() )
for key in keys:
__snake_case = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__snake_case = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(F'''{key} -> {new_key}''' )
__snake_case = s_dict.pop(__UpperCamelCase )
return s_dict
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> List[str]:
__snake_case , __snake_case = emb.weight.shape
__snake_case = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
__snake_case = emb.weight.data
return lin_layer
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> int:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__snake_case = os.path.basename(__UpperCamelCase )
__snake_case = url.split("/" )[-2]
__snake_case = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not os.path.isfile(__UpperCamelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__UpperCamelCase ):
__snake_case = open(__UpperCamelCase , "rb" ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__UpperCamelCase ) as source, open(__UpperCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=__UpperCamelCase , unit_divisor=10_24 ) as loop:
while True:
__snake_case = source.read(81_92 )
if not buffer:
break
output.write(__UpperCamelCase )
loop.update(len(__UpperCamelCase ) )
__snake_case = open(__UpperCamelCase , "rb" ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> Any:
if ".pt" not in checkpoint_path:
__snake_case = _download(_MODELS[checkpoint_path] )
else:
__snake_case = torch.load(__UpperCamelCase , map_location="cpu" )
__snake_case = original_checkpoint["dims"]
__snake_case = original_checkpoint["model_state_dict"]
__snake_case = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
__snake_case = True
__snake_case = state_dict["decoder.layers.0.fc1.weight"].shape[0]
__snake_case = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=__UpperCamelCase , decoder_ffn_dim=__UpperCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
__snake_case = WhisperForConditionalGeneration(__UpperCamelCase )
__snake_case , __snake_case = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
__snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__snake_case = proj_out_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 69
|
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 86
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=13 , _UpperCamelCase : Dict=3 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Dict=224 , _UpperCamelCase : Optional[Any]=1_000 , _UpperCamelCase : Optional[Any]=[3, 3, 6, 4] , _UpperCamelCase : Union[str, Any]=[48, 56, 112, 220] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCamelCase , layer_scale_init_value=1e-5 , )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __snake_case( self : int , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : Any ) -> Tuple:
'''simple docstring'''
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase__ : str = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Optional[Any] = False
lowercase__ : Optional[Any] = False
lowercase__ : Any = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __snake_case( self : str ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
pass
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
def _config_zero_init(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(_UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCamelCase , _UpperCamelCase , 1e-10 )
if isinstance(getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(_UpperCamelCase , _UpperCamelCase ) )
setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 712
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 647
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = DownBlockaD # noqa F405
lowercase = 'down'
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = ResnetDownsampleBlockaD # noqa F405
lowercase = 'down'
def __a ( self : List[str] ) -> str:
'''simple docstring'''
lowercase = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnDownBlockaD # noqa F405
lowercase = 'down'
def __a ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = CrossAttnDownBlockaD # noqa F405
lowercase = 'down'
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
def __a ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = SimpleCrossAttnDownBlockaD # noqa F405
lowercase = 'down'
@property
def __a ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def __a ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = SkipDownBlockaD # noqa F405
lowercase = 'down'
@property
def __a ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=A_ )
def __a ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnSkipDownBlockaD # noqa F405
lowercase = 'down'
@property
def __a ( self : int ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=A_ )
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = DownEncoderBlockaD # noqa F405
lowercase = 'down'
@property
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __a ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def __a ( self : List[str] ) -> int:
'''simple docstring'''
lowercase = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnDownEncoderBlockaD # noqa F405
lowercase = 'down'
@property
def __a ( self : str ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __a ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase = {
"""in_channels""": 32,
"""out_channels""": 32,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = UNetMidBlockaD # noqa F405
lowercase = 'mid'
def __a ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = {
"""in_channels""": 32,
"""temb_channels""": 1_28,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def __a ( self : int ) -> Any:
'''simple docstring'''
lowercase = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = UNetMidBlockaDCrossAttn # noqa F405
lowercase = 'mid'
def __a ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
def __a ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase = 'mid'
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=A_ )
def __a ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
def __a ( self : str ) -> Tuple:
'''simple docstring'''
lowercase = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = UpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __a ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = ResnetUpsampleBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : int ) -> List[Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = CrossAttnUpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : Tuple ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __a ( self : Tuple ) -> str:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = SimpleCrossAttnUpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : int ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ , include_encoder_hidden_states=A_ )
def __a ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = super().prepare_init_args_and_inputs_for_common()
lowercase = 32
return init_dict, inputs_dict
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnUpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : int ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __a ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = SkipUpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : int ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __a ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnSkipUpBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=A_ )
def __a ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = UpDecoderBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : Tuple ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = {"""in_channels""": 32, """out_channels""": 32}
lowercase = self.dummy_input
return init_dict, inputs_dict
def __a ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A_ )
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = AttnUpDecoderBlockaD # noqa F405
lowercase = 'up'
@property
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_temb=A_ )
def __a ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = {"""in_channels""": 32, """out_channels""": 32}
lowercase = self.dummy_input
return init_dict, inputs_dict
def __a ( self : str ) -> Any:
'''simple docstring'''
lowercase = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A_ )
| 604
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( _UpperCAmelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase_: Optional[Any] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase_: Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase_: Dict = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase_: Optional[int] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase_: int = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase_: List[Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase_: List[str] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase_: Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase_: Optional[Any] = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase_: Optional[int] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase_: Tuple = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase_: Tuple = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase_: Dict = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase_: Optional[int] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase_: Any = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase_: Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase_: Optional[int] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCamelCase_: List[Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowerCamelCase_: Optional[Any] = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowerCamelCase_: Optional[int] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase_: List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase_: Any = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase_: List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase_: int = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_: str = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase_: List[str] = key.split(""".""" )
lowerCamelCase_ , lowerCamelCase_: str = int(key_split[2] ), int(key_split[4] )
lowerCamelCase_: Optional[int] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase_: int = val[:dim, :]
lowerCamelCase_: int = val[dim : dim * 2, :]
lowerCamelCase_: Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_: List[Any] = val[:dim]
lowerCamelCase_: List[Any] = val[dim : dim * 2]
lowerCamelCase_: Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase_: int = key.split(""".""" )
lowerCamelCase_: Tuple = int(key_split[3] )
lowerCamelCase_: List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase_: Any = val[:dim, :]
lowerCamelCase_: str = val[
dim : dim * 2, :
]
lowerCamelCase_: List[str] = val[-dim:, :]
else:
lowerCamelCase_: Optional[Any] = val[:dim]
lowerCamelCase_: Optional[Any] = val[dim : dim * 2]
lowerCamelCase_: Optional[int] = val[-dim:]
else:
lowerCamelCase_: Dict = rename_key(_UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase_: Tuple = val.squeeze_()
else:
lowerCamelCase_: Dict = val
return orig_state_dict
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_: Optional[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="groupvit-gcc-yfcc" , _UpperCAmelCase=False ):
lowerCamelCase_: Optional[int] = GroupViTConfig()
lowerCamelCase_: Optional[Any] = GroupViTModel(_UpperCAmelCase ).eval()
lowerCamelCase_: List[str] = torch.load(_UpperCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase_: Union[str, Any] = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_: str = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_UpperCAmelCase ) == 0)
# verify result
lowerCamelCase_: Optional[int] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase_: str = prepare_img()
lowerCamelCase_: Dict = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase_: Dict = model(**_UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase_: Optional[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase_: Union[str, Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , _UpperCAmelCase , atol=1E-3 )
processor.save_pretrained(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print("""Successfully saved processor and model to""" , _UpperCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(_UpperCAmelCase , organization="""nielsr""" )
model.push_to_hub(_UpperCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 423
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE ( A : str ) -> Union[str, Any]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__snake_case : Tuple = model_type_to_module_name(A )
__snake_case : Tuple = importlib.import_module(F""".{module_name}""" , 'transformers.models' )
try:
return getattr(A , A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A , '__name__' , A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__snake_case : List[str] = importlib.import_module('transformers' )
if hasattr(A , A ):
return getattr(A , A )
return None
def _SCREAMING_SNAKE_CASE ( A : Union[str, os.PathLike] , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = get_file_from_repo(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(A , encoding='utf-8' ) as reader:
return json.load(A )
class a_ :
def __init__(self) -> Dict:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(__a)
def SCREAMING_SNAKE_CASE__ (cls , __a , **__a) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = kwargs.pop('config' , __a)
__snake_case : Tuple = kwargs.pop('trust_remote_code' , __a)
__snake_case : List[Any] = True
__snake_case ,__snake_case : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(__a , **__a)
__snake_case : Optional[int] = config_dict.get('feature_extractor_type' , __a)
__snake_case : str = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
__snake_case : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__a , __a):
__snake_case : Optional[int] = AutoConfig.from_pretrained(__a , **__a)
# It could be in `config.feature_extractor_type``
__snake_case : int = getattr(__a , 'feature_extractor_type' , __a)
if hasattr(__a , 'auto_map') and "AutoFeatureExtractor" in config.auto_map:
__snake_case : Tuple = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
__snake_case : List[Any] = feature_extractor_class_from_name(__a)
__snake_case : Optional[Any] = feature_extractor_auto_map is not None
__snake_case : Union[str, Any] = feature_extractor_class is not None or type(__a) in FEATURE_EXTRACTOR_MAPPING
__snake_case : str = resolve_trust_remote_code(
__a , __a , __a , __a)
if has_remote_code and trust_remote_code:
__snake_case : List[str] = get_class_from_dynamic_module(
__a , __a , **__a)
__snake_case : Any = kwargs.pop('code_revision' , __a)
if os.path.isdir(__a):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__a , **__a)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__a , **__a)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__a) in FEATURE_EXTRACTOR_MAPPING:
__snake_case : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(__a)]
return feature_extractor_class.from_dict(__a , **__a)
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}""")
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a , __a) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__a , __a)
| 61
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
| 61
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip('/')
SCREAMING_SNAKE_CASE = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''')
SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin'))
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(F'''Copied layer {name}''')
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(snake_case__)
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0)
SCREAMING_SNAKE_CASE = tensor * mask
print(F'''Pruned layer {name}''')
else:
raise ValueError('Unknown pruning method')
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(snake_case__) , F'''bertarized_{os.path.basename(snake_case__)}''')
if not os.path.isdir(snake_case__):
shutil.copytree(snake_case__ , snake_case__)
print(F'''\nCreated folder {target_model_path}''')
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin'))
print('\nPruned model saved! See you later!')
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
a_ : List[Any] = parser.parse_args()
main(args)
| 73
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A )
| 67
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase : str = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = None , __lowercase = True , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
__lowerCAmelCase = get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_flip_channel_order
def _snake_case (self , __lowercase , __lowercase , __lowercase = PIL.Image.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCAmelCase = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None ):
return flip_channel_order(__lowercase , data_format=__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__lowerCAmelCase = [self.flip_channel_order(image=__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowercase ):
__lowerCAmelCase = target_sizes.numpy()
__lowerCAmelCase = []
for idx in range(len(__lowercase ) ):
__lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowercase )
__lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
__lowerCAmelCase = logits.argmax(dim=1 )
__lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 474
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCAmelCase : str = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase , __lowerCAmelCase = create_model(
'''HTSAT-tiny''', '''roberta''', lowerCamelCase, precision='''fp32''', device='''cuda:0''' if torch.cuda.is_available() else '''cpu''', enable_fusion=lowerCamelCase, fusion_type='''aff_2d''' if enable_fusion else None, )
return model, model_cfg
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {}
__lowerCAmelCase = r'''.*sequential.(\d+).*'''
__lowerCAmelCase = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase = key.replace(lowerCamelCase, lowerCamelCase)
if re.match(lowerCamelCase, lowerCamelCase):
# replace sequential layers with list
__lowerCAmelCase = re.match(lowerCamelCase, lowerCamelCase).group(1)
__lowerCAmelCase = key.replace(F"""sequential.{sequential_layer}.""", F"""layers.{int(lowerCamelCase)//3}.linear.""")
elif re.match(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = int(re.match(lowerCamelCase, lowerCamelCase).group(1))
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase = 1 if projecton_layer == 0 else 2
__lowerCAmelCase = key.replace(F"""_projection.{projecton_layer}.""", F"""_projection.linear{transformers_projection_layer}.""")
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase = value
__lowerCAmelCase = mixed_qkv.size(0) // 3
__lowerCAmelCase = mixed_qkv[:qkv_dim]
__lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase = query_layer
__lowerCAmelCase = key_layer
__lowerCAmelCase = value_layer
else:
__lowerCAmelCase = value
return model_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase , __lowerCAmelCase = init_clap(lowerCamelCase, enable_fusion=lowerCamelCase)
clap_model.eval()
__lowerCAmelCase = clap_model.state_dict()
__lowerCAmelCase = rename_state_dict(lowerCamelCase)
__lowerCAmelCase = ClapConfig()
__lowerCAmelCase = enable_fusion
__lowerCAmelCase = ClapModel(lowerCamelCase)
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
transformers_config.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 474
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'})
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowercase ( self : Dict ) -> Union[str, Any]:
return self._get_superresolution_dummy_components()
def __lowercase ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : Dict=0 ) -> Tuple:
if str(lowerCamelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Any = torch.manual_seed(lowerCamelCase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowerCAmelCase_ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowercase ( self : List[str] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowercase ( self : List[Any] ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowercase ( self : Tuple ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowercase ( self : List[str] ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowercase ( self : int ) -> Union[str, Any]:
self._test_save_load_local()
def __lowercase ( self : Tuple ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 275
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A : Tuple = 10
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
for i in range(A__ , A__ ):
if array[i] == target:
return i
return -1
def UpperCamelCase_ ( A__ : list[int] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Union[str, Any] = len(A__ )
while left <= right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : List[str] = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Tuple = two_third + 1
else:
lowerCAmelCase_ : Tuple = one_third + 1
lowerCAmelCase_ : Any = two_third - 1
else:
return -1
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : list[int] , A__ : int ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(A__ , one_third - 1 , A__ , A__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , A__ , A__ , A__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , A__ , A__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = input("Enter numbers separated by comma:\n").strip()
__A : Union[str, Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__A : List[Any] = int(input("Enter the number to be found in the list:\n").strip())
__A : str = ite_ternary_search(collection, target)
__A : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 275
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
snake_case__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCAmelCase ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def __magic_name__( __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_lowerCamelCase = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
_lowerCamelCase = PipelineDataFormat.from_str(
format=__UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCAmelCase , __UpperCAmelCase )
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = nlp
_lowerCamelCase = reader
@staticmethod
def UpperCamelCase_ ( A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=A_ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=A_ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=A_ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=A_ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=A_ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=A_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=A_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=A_ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self._nlp, []
for entry in self._reader:
_lowerCamelCase = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_lowerCamelCase = self._reader.save_binary(A_ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(A_ )
| 638
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCamelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
# load decoder from hub
_lowerCamelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase_ ( self , **A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> Optional[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self , **A_ ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(A_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = feature_extractor(A_ , return_tensors='''np''' )
_lowerCamelCase = processor(A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = '''This is a test string'''
_lowerCamelCase = processor(text=A_ )
_lowerCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self , A_=(2, 10, 16) , A_=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(A_ )
return np.random.rand(*A_ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase = processor.decode(A_ )
_lowerCamelCase = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase_ ( self , A_ ) -> int:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCamelCase = processor.batch_decode(A_ , A_ )
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase = decoder.decode_beams_batch(A_ , A_ )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 15
_lowerCamelCase = -20.0
_lowerCamelCase = -4.0
_lowerCamelCase = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = 2.0
_lowerCamelCase = 5.0
_lowerCamelCase = -20.0
_lowerCamelCase = True
_lowerCamelCase = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCamelCase = decoded_processor_out.text
_lowerCamelCase = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCamelCase = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase = os.listdir(A_ )
_lowerCamelCase = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = floats_list((3, 10_00) )
_lowerCamelCase = processor_wavaveca(A_ , return_tensors='''np''' )
_lowerCamelCase = processor_auto(A_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor_wavaveca.batch_decode(A_ )
_lowerCamelCase = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
_lowerCamelCase = self.get_feature_extractor()
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_decoder()
_lowerCamelCase = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase_ ( A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()[0]
_lowerCamelCase = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase = self._get_dummy_logits()
_lowerCamelCase = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
import torch
_lowerCamelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A_ )
_lowerCamelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_lowerCamelCase = iter(A_ )
_lowerCamelCase = next(A_ )
_lowerCamelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase = model(A_ ).logits.cpu().numpy()
_lowerCamelCase = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , A_ )
self.assertEqual(''' '''.join(self.get_from_offsets(A_ , '''word''' ) ) , output.text )
# output times
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''start_time''' ) )
_lowerCamelCase = torch.tensor(self.get_from_offsets(A_ , '''end_time''' ) )
# fmt: off
_lowerCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 638
| 1
|
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 50 ):
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 386
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''roc_bert'''
def __init__( self :Union[str, Any] , _lowerCamelCase :Any=3_0_5_2_2 , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Optional[Any]=1_2 , _lowerCamelCase :List[str]=1_2 , _lowerCamelCase :str=3_0_7_2 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :List[Any]=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :Optional[int]=5_1_2 , _lowerCamelCase :Dict=2 , _lowerCamelCase :Any=0.0_2 , _lowerCamelCase :Optional[int]=1e-12 , _lowerCamelCase :str=True , _lowerCamelCase :Any=0 , _lowerCamelCase :List[str]="absolute" , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Any=True , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Union[str, Any]=9_1_0 , _lowerCamelCase :List[Any]=5_1_2 , _lowerCamelCase :Optional[int]=2_4_8_5_8 , _lowerCamelCase :Union[str, Any]=True , **_lowerCamelCase :str , ):
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : int = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = use_cache
__SCREAMING_SNAKE_CASE : str = enable_pronunciation
__SCREAMING_SNAKE_CASE : List[str] = enable_shape
__SCREAMING_SNAKE_CASE : Tuple = pronunciation_embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = pronunciation_vocab_size
__SCREAMING_SNAKE_CASE : str = shape_embed_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = shape_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = concat_input
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = classifier_dropout
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
| 674
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: str = "cpu" , UpperCAmelCase_: str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = device
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
_SCREAMING_SNAKE_CASE = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
_SCREAMING_SNAKE_CASE = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_SCREAMING_SNAKE_CASE = torchvision.transforms.Resize(224 )
_SCREAMING_SNAKE_CASE = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.resize(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.center_crop(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.normalize(UpperCAmelCase_ )
return images
def __call__( self: Tuple , UpperCAmelCase_: str=None , UpperCAmelCase_: Dict=None , **UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.tokenizer(text=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.preprocess_img(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCAmelCase (nn.Module ):
def __init__( self: Union[str, Any] , UpperCAmelCase_: Dict=10 , UpperCAmelCase_: List[Any]=0.01 , UpperCAmelCase_: int=None , UpperCAmelCase_: List[Any]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Any=None , UpperCAmelCase_: int=False , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Tuple="image" , UpperCAmelCase_: Any=True , UpperCAmelCase_: Optional[int]=False , UpperCAmelCase_: Union[str, Any]=False , UpperCAmelCase_: Union[str, Any]=False , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = device if device else get_device()
if vqgan:
_SCREAMING_SNAKE_CASE = vqgan
else:
_SCREAMING_SNAKE_CASE = load_vqgan(self.device , conf_path=UpperCAmelCase_ , ckpt_path=UpperCAmelCase_ )
self.vqgan.eval()
if clip:
_SCREAMING_SNAKE_CASE = clip
else:
_SCREAMING_SNAKE_CASE = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_SCREAMING_SNAKE_CASE = ProcessorGradientFlow(device=self.device )
_SCREAMING_SNAKE_CASE = iterations
_SCREAMING_SNAKE_CASE = lr
_SCREAMING_SNAKE_CASE = log
_SCREAMING_SNAKE_CASE = make_grid
_SCREAMING_SNAKE_CASE = return_val
_SCREAMING_SNAKE_CASE = quantize
_SCREAMING_SNAKE_CASE = self.vqgan.decoder.z_shape
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str]=None , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: Optional[int]=5 , UpperCAmelCase_: Dict=True ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
if output_path is None:
_SCREAMING_SNAKE_CASE = """./animation.gif"""
if input_path is None:
_SCREAMING_SNAKE_CASE = self.save_path
_SCREAMING_SNAKE_CASE = sorted(glob(input_path + """/*""" ) )
if not len(UpperCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_SCREAMING_SNAKE_CASE = total_duration / len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [frame_duration] * len(UpperCAmelCase_ )
if extend_frames:
_SCREAMING_SNAKE_CASE = 1.5
_SCREAMING_SNAKE_CASE = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCAmelCase_ ) )
imageio.mimsave(UpperCAmelCase_ , UpperCAmelCase_ , duration=UpperCAmelCase_ )
print(F'gif saved to {output_path}' )
def UpperCamelCase ( self: int , UpperCAmelCase_: Any=None , UpperCAmelCase_: Union[str, Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_SCREAMING_SNAKE_CASE = preprocess(Image.open(UpperCAmelCase_ ) , target_image_size=256 ).to(self.device )
_SCREAMING_SNAKE_CASE = preprocess_vqgan(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.vqgan.encode(UpperCAmelCase_ )
return z
def UpperCamelCase ( self: str , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.latent.detach().requires_grad_()
_SCREAMING_SNAKE_CASE = base_latent + transform_vector
if self.quantize:
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.vqgan.quantize(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = trans_latent
return self.vqgan.decode(UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str]=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.clip_preprocessor(text=UpperCAmelCase_ , images=UpperCAmelCase_ , return_tensors="""pt""" , padding=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.clip(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = clip_outputs.logits_per_image
if weights is not None:
_SCREAMING_SNAKE_CASE = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase ( self: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_SCREAMING_SNAKE_CASE = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_SCREAMING_SNAKE_CASE = torch.tensor([1] , device=self.device )
_SCREAMING_SNAKE_CASE = -torch.log(UpperCAmelCase_ ) + torch.log(UpperCAmelCase_ )
return loss
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.randn_like(self.latent , requires_grad=UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_SCREAMING_SNAKE_CASE = self._add_vector(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = loop_post_process(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self._get_CLIP_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print("""CLIP loss""" , UpperCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
wandb.init(reinit=UpperCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_SCREAMING_SNAKE_CASE = Image.open(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
if not prompts:
return []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCAmelCase_ , (tuple, list) ):
_SCREAMING_SNAKE_CASE = prompt[0]
_SCREAMING_SNAKE_CASE = float(prompt[1] )
elif ":" in prompt:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = prompt.split(""":""" )
_SCREAMING_SNAKE_CASE = float(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = prompt
_SCREAMING_SNAKE_CASE = 1.0
processed_prompts.append(UpperCAmelCase_ )
weights.append(UpperCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase_ , device=self.device ),
}
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: Dict=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=None , ):
'''simple docstring'''
if image_path:
_SCREAMING_SNAKE_CASE = self._get_latent(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_SCREAMING_SNAKE_CASE = self.process_prompts(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.process_prompts(UpperCAmelCase_ )
if save_final and save_path is None:
_SCREAMING_SNAKE_CASE = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = save_path + """_""" + get_timestamp()
os.makedirs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = save_path
_SCREAMING_SNAKE_CASE = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = loop_post_process(UpperCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ):
if show_intermediate:
show_pil(UpperCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCAmelCase_ )} )
if show_final:
show_pil(UpperCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 569
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: List[str]=18 , UpperCAmelCase_: Any=30 , UpperCAmelCase_: str=400 , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: str=None , UpperCAmelCase_: List[Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = apply_ocr
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """apply_ocr""" ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_SCREAMING_SNAKE_CASE = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 569
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
a_ : Any = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def __a ( ):
a__ = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
a__ = os.path.join(__UpperCAmelCase , '''words.txt''' )
a__ = ''''''
with open(__UpperCAmelCase ) as f:
a__ = f.readline()
a__ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
a__ = [
word
for word in [sum(ord(__UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 194
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ : List[Any] = {'facebook/blenderbot_small-90M': 5_12}
def __a ( __UpperCAmelCase ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
a__ = set(__UpperCAmelCase )
return pairs
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="__start__" , SCREAMING_SNAKE_CASE="__end__" , SCREAMING_SNAKE_CASE="__unk__" , SCREAMING_SNAKE_CASE="__null__" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
a__ = json.load(SCREAMING_SNAKE_CASE )
a__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
a__ = merges_handle.read().split('''\n''' )[1:-1]
a__ = [tuple(merge.split() ) for merge in merges]
a__ = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
a__ = {}
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
if token in self.cache:
return self.cache[token]
a__ = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE )
a__ = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE )
a__ = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE )
if "\n" in token:
a__ = token.replace('''\n''' , ''' __newln__''' )
a__ = token.split(''' ''' )
a__ = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE ):
continue
a__ = token.lower()
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a__ = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
words.append(SCREAMING_SNAKE_CASE )
continue
while True:
a__ = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
a__ = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
a__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(SCREAMING_SNAKE_CASE )
a__ = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
a__ = get_pairs(SCREAMING_SNAKE_CASE )
a__ = '''@@ '''.join(SCREAMING_SNAKE_CASE )
a__ = word[:-4]
a__ = word
words.append(SCREAMING_SNAKE_CASE )
return " ".join(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = []
a__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
a__ = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
a__ = ''' '''.join(SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a__ = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + '''\n''' )
a__ = 0
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
a__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 194
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase = logging.get_logger(__name__)
enable_full_determinism()
class _a ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = """sample"""
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: str = 4
lowercase__: int = 3
lowercase__: int = (32, 32)
lowercase__: Optional[Any] = floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowercase__: Any = torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
return (3, 32, 32)
@property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
lowercase__: List[Any] = self.dummy_input
return init_dict, inputs_dict
class _a ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = """sample"""
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = 4
lowercase__: Optional[int] = 4
lowercase__: str = (32, 32)
lowercase__: Any = floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowercase__: Dict = torch.tensor([10]).to(UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (4, 32, 32)
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return (4, 32, 32)
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Any = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
lowercase__: Dict = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowercase__: Optional[Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model.to(UpperCAmelCase_)
lowercase__: Union[str, Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_)
model_accelerate.to(UpperCAmelCase_)
model_accelerate.eval()
lowercase__: Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
lowercase__: Optional[Any] = noise.to(UpperCAmelCase_)
lowercase__: Optional[int] = torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
lowercase__: Optional[int] = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase__ , lowercase__: Tuple = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_)
model_normal_load.to(UpperCAmelCase_)
model_normal_load.eval()
lowercase__: str = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_)["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3)
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(UpperCAmelCase_)
lowercase__: Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
lowercase__: List[str] = noise.to(UpperCAmelCase_)
lowercase__: List[Any] = torch.tensor([10] * noise.shape[0]).to(UpperCAmelCase_)
with torch.no_grad():
lowercase__: Optional[int] = model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowercase__: List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase__: Any = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3))
class _a ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = """sample"""
@property
def __lowercase ( self , UpperCAmelCase_=(32, 32)) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = 4
lowercase__: str = 3
lowercase__: Optional[int] = floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowercase__: str = torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=UpperCAmelCase_)
return {"sample": noise, "timestep": time_step}
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Any = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
lowercase__: Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__ , lowercase__: Optional[int] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(UpperCAmelCase_)
lowercase__: str = self.dummy_input
lowercase__: List[str] = floats_tensor((4, 3) + (256, 256)).to(UpperCAmelCase_)
lowercase__: Tuple = noise
lowercase__: Tuple = model(**UpperCAmelCase_)
assert image is not None, "Make sure output is not None"
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(UpperCAmelCase_)
lowercase__: List[Any] = 4
lowercase__: int = 3
lowercase__: Optional[int] = (256, 256)
lowercase__: Union[str, Any] = torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowercase__: List[Any] = torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowercase__: Tuple = model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowercase__: str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase__: Any = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(UpperCAmelCase_)
lowercase__: int = 4
lowercase__: Any = 3
lowercase__: Dict = (32, 32)
lowercase__: Tuple = torch.ones((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
lowercase__: Tuple = torch.tensor(batch_size * [1E-4]).to(UpperCAmelCase_)
with torch.no_grad():
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , UpperCAmelCase_).sample
lowercase__: int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase__: Optional[Any] = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56])
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2))
def __lowercase ( self) -> int:
'''simple docstring'''
pass
| 120
|
"""simple docstring"""
from math import factorial
def A( snake_case_ = 20 ):
"""simple docstring"""
lowercase__: Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__: int = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 120
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_a : Optional[Any] = [
"good first issue",
"feature request",
"wip",
]
def _a () -> Any:
"""simple docstring"""
__snake_case = Github(os.environ['GITHUB_TOKEN'] )
__snake_case = g.get_repo('huggingface/accelerate' )
__snake_case = repo.get_issues(state='open' )
for issue in open_issues:
__snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__snake_case = comments[0] if len(lowercase__ ) > 0 else None
__snake_case = dt.utcnow()
__snake_case = (current_time - issue.updated_at).days
__snake_case = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 56
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = torchvision.models.resnetaaa(pretrained=lowerCAmelCase )
UpperCAmelCase_ = list(model.children() )[:-2]
UpperCAmelCase_ = nn.Sequential(*lowerCAmelCase )
UpperCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , lowerCAmelCase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ = self.pool(self.model(lowerCAmelCase ) )
UpperCAmelCase_ = torch.flatten(lowerCAmelCase , start_dim=2 )
UpperCAmelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = [json.loads(lowerCAmelCase ) for l in open(lowerCAmelCase )]
UpperCAmelCase_ = os.path.dirname(lowerCAmelCase )
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = labels
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , lowerCAmelCase ):
UpperCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ = sentence[: self.max_seq_length]
UpperCAmelCase_ = torch.zeros(self.n_classes )
UpperCAmelCase_ = 1
UpperCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
UpperCAmelCase_ = self.transforms(lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [len(row["sentence"] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase_ = torch.zeros(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ = input_row["sentence"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = torch.stack([row["image"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["label"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] )
UpperCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) -> int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) -> Optional[int]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 579
| 0
|
A__ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 219
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A__ = True
except ImportError:
A__ = False
try:
from torch.hub import _get_torch_home
A__ = _get_torch_home()
except ImportError:
A__ = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
A__ = os.path.join(torch_cache_home, '''transformers''')
A__ = '''https://cdn.huggingface.co'''
A__ = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
A__ = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
A__ = os.path.join(PATH, '''config.yaml''')
A__ = os.path.join(PATH, '''attributes.txt''')
A__ = os.path.join(PATH, '''objects.txt''')
A__ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
A__ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
A__ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
A__ = '''pytorch_model.bin'''
A__ = '''config.yaml'''
def _lowerCAmelCase ( __lowerCAmelCase=OBJECTS , __lowerCAmelCase=ATTRIBUTES ) -> List[str]:
"""simple docstring"""
snake_case__ : str = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
snake_case__ : List[Any] = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : int = OrderedDict()
with open(__lowerCAmelCase , '''rb''' ) as f:
snake_case__ : List[Any] = pkl.load(__lowerCAmelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case__ : Union[str, Any] = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
snake_case__ : List[Any] = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
snake_case__ : int = v
return r
class a :
__lowerCAmelCase : Dict = {}
def __init__( self :List[Any] ,__lowercase :dict ,__lowercase :str = "root" ,__lowercase :Tuple=0 ):
snake_case__ : Dict = name
snake_case__ : str = level
snake_case__ : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case__ : Any = copy.deepcopy(__lowercase )
snake_case__ : List[str] = copy.deepcopy(__lowercase )
if isinstance(__lowercase ,__lowercase ):
snake_case__ : Dict = Config(__lowercase ,name=__lowercase ,level=level + 1 )
snake_case__ : Optional[Any] = v
setattr(self ,__lowercase ,__lowercase )
snake_case__ : List[str] = d
def __repr__( self :Dict ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self :int ,__lowercase :Tuple ,__lowercase :Dict ):
snake_case__ : int = val
snake_case__ : Optional[Any] = val
snake_case__ : str = key.split('''.''' )
snake_case__ : Any = len(__lowercase ) - 1
snake_case__ : Optional[int] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self ,__lowercase ) and isinstance(getattr(self ,__lowercase ) ,__lowercase ):
setattr(getattr(self ,__lowercase ) ,'''.'''.join(levels[i:] ) ,__lowercase )
if l == last_level:
snake_case__ : Tuple = val
else:
snake_case__ : Any = pointer[l]
def __lowerCamelCase ( self :Union[str, Any] ):
return self._pointer
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[str] ,__lowercase :Any ):
with open(F"""{file_name}""" ,'''w''' ) as stream:
dump(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Dict ,__lowercase :List[str] ):
with open(F"""{file_name}""" ,'''w''' ) as stream:
json.dump(__lowercase ,__lowercase )
@staticmethod
def __lowerCamelCase ( __lowercase :Union[str, Any] ):
with open(__lowercase ) as stream:
snake_case__ : Optional[Any] = load(__lowercase ,Loader=__lowercase )
return data
def __str__( self :Optional[Any] ):
snake_case__ : Dict = ''' '''
if self._name != "root":
snake_case__ : Optional[int] = F"""{t * (self._level-1)}{self._name}:\n"""
else:
snake_case__ : Tuple = ''''''
snake_case__ : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase ,__lowercase ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
snake_case__ : Optional[int] = level
return r[:-1]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ,__lowercase :str ,**__lowercase :str ):
snake_case__ , snake_case__ : Optional[Any] = cls.get_config_dict(__lowercase ,**__lowercase )
return cls(__lowercase )
@classmethod
def __lowerCamelCase ( cls :str ,__lowercase :str ,**__lowercase :List[str] ):
snake_case__ : Optional[Any] = kwargs.pop('''cache_dir''' ,__lowercase )
snake_case__ : Optional[int] = kwargs.pop('''force_download''' ,__lowercase )
snake_case__ : Optional[Any] = kwargs.pop('''resume_download''' ,__lowercase )
snake_case__ : Dict = kwargs.pop('''proxies''' ,__lowercase )
snake_case__ : Any = kwargs.pop('''local_files_only''' ,__lowercase )
if os.path.isdir(__lowercase ):
snake_case__ : Optional[Any] = os.path.join(__lowercase ,__lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
snake_case__ : Union[str, Any] = pretrained_model_name_or_path
else:
snake_case__ : Union[str, Any] = hf_bucket_url(__lowercase ,filename=__lowercase ,use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
snake_case__ : str = cached_path(
__lowercase ,cache_dir=__lowercase ,force_download=__lowercase ,proxies=__lowercase ,resume_download=__lowercase ,local_files_only=__lowercase ,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case__ : Union[str, Any] = Config.load_yaml(__lowercase )
except EnvironmentError:
snake_case__ : Optional[Any] = '''Can\'t load config for'''
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(__lowercase ), kwargs
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = torch.load('''dump.pt''' , map_location=in_tensor.device )
snake_case__ : Optional[int] = in_tensor.numpy()
snake_case__ : int = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str:
"""simple docstring"""
snake_case__ : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case__ : Union[str, Any] = '''/''' not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , __lowerCAmelCase=None , ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
snake_case__ : Optional[int] = {'''user-agent''': ua}
if resume_size > 0:
snake_case__ : Union[str, Any] = '''bytes=%d-''' % (resume_size,)
snake_case__ : Any = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
snake_case__ : Optional[Any] = response.headers.get('''Content-Length''' )
snake_case__ : Any = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
snake_case__ : Any = tqdm(
unit='''B''' , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=10 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , ) -> Any:
"""simple docstring"""
if cache_dir is None:
snake_case__ : Tuple = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ : List[Any] = None
if not local_files_only:
try:
snake_case__ : Tuple = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
snake_case__ : Optional[int] = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case__ : str = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
snake_case__ : Tuple = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
snake_case__ : int = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case__ : Tuple = cache_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case__ : List[str] = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , '''a+b''' ) as f:
yield f
snake_case__ : Any = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
snake_case__ : str = os.stat(__lowerCAmelCase ).st_size
else:
snake_case__ : int = 0
else:
snake_case__ : Optional[Any] = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
snake_case__ : List[str] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
snake_case__ : Optional[Any] = {'''url''': url, '''etag''': etag}
snake_case__ : Tuple = cache_path + '''.json'''
with open(__lowerCAmelCase , '''w''' ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Any:
"""simple docstring"""
snake_case__ : Dict = url.encode('''utf-8''' )
snake_case__ : Optional[int] = shaaaa(__lowerCAmelCase )
snake_case__ : int = url_hash.hexdigest()
if etag:
snake_case__ : Any = etag.encode('''utf-8''' )
snake_case__ : str = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> List[Any]:
"""simple docstring"""
if cache_dir is None:
snake_case__ : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[str] = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
snake_case__ : Dict = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
snake_case__ : Any = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case__ , snake_case__ : List[Any] = os.path.split(__lowerCAmelCase )
snake_case__ : Optional[Any] = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case__ : Tuple = output_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , '''r''' ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
snake_case__ : List[str] = eval(f.read() )
else:
snake_case__ : Optional[Any] = requests.get(__lowerCAmelCase )
try:
snake_case__ : Tuple = requests.json()
except Exception:
snake_case__ : Optional[int] = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case__ : int = eval(__lowerCAmelCase )
except Exception:
snake_case__ : Optional[Any] = data.split('''\n''' )
req.close()
return data
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict = requests.get(__lowerCAmelCase )
snake_case__ : Tuple = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as stream:
snake_case__ : List[str] = pkl.load(__lowerCAmelCase )
snake_case__ : Optional[Any] = weights.pop('''model''' )
snake_case__ : List[str] = {}
for k, v in model.items():
snake_case__ : Optional[int] = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
snake_case__ : Tuple = torch.tensor([0] )
snake_case__ : Optional[Any] = k.replace('''running_var''' , '''num_batches_tracked''' )
snake_case__ : Dict = zero
return new
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
print(f"""{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb""" )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="RGB" ) -> Dict:
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
snake_case__ : int = cva.imread(__lowerCAmelCase )
else:
snake_case__ : List[Any] = get_image_from_url(__lowerCAmelCase )
assert img is not None, f"""could not connect to: {im}"""
snake_case__ : str = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case__ : Any = img[:, :, ::-1]
return img
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1 ) -> str:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
| 219
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__SCREAMING_SNAKE_CASE = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
a = input("Enter a string ").strip()
a = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 109
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = b.T
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
__SCREAMING_SNAKE_CASE = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
__SCREAMING_SNAKE_CASE = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = x.reshape(-1 , 3 )
__SCREAMING_SNAKE_CASE = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class __a ( _snake_case ):
__UpperCamelCase : Any = ['pixel_values']
def __init__( self : Any ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 256, """width""": 256}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase ) if clusters is not None else None
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Dict[str, int] ,lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowerCamelCase ,size=(size["""height"""], size["""width"""]) ,resample=lowerCamelCase ,data_format=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : np.ndarray ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = rescale(image=lowerCamelCase ,scale=1 / 127.5 ,data_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = image - 1
return image
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : ImageInput ,lowerCamelCase : bool = None ,lowerCamelCase : Dict[str, int] = None ,lowerCamelCase : PILImageResampling = None ,lowerCamelCase : bool = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase ,size=lowerCamelCase ,resample=lowerCamelCase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase ) for image in images]
if do_color_quantize:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__SCREAMING_SNAKE_CASE = np.array(lowerCamelCase )
__SCREAMING_SNAKE_CASE = color_quantize(lowerCamelCase ,lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__SCREAMING_SNAKE_CASE = images.shape[0]
__SCREAMING_SNAKE_CASE = images.reshape(lowerCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase ,lowerCamelCase ) for image in images]
__SCREAMING_SNAKE_CASE = {"""input_ids""": images}
return BatchFeature(data=lowerCamelCase ,tensor_type=lowerCamelCase )
| 109
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase : Any = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : List[str] = PegasusConfig
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Tuple = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) -> Tuple:
A : Tuple = parent
A : int = batch_size
A : List[Any] = seq_length
A : Any = is_training
A : int = use_labels
A : Any = vocab_size
A : Optional[int] = hidden_size
A : Optional[int] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[Any] = eos_token_id
A : int = pad_token_id
A : Any = bos_token_id
def snake_case ( self ) -> Tuple:
A : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A : Any = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : List[Any] = 20
A : Union[str, Any] = model_class_name(__UpperCAmelCase )
A : Any = model.encode(inputs_dict['''input_ids'''] )
A , A : Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A : Dict = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A : List[str] = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCAmelCase , )
A : Dict = model.decode(__UpperCAmelCase , __UpperCAmelCase )
A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
A : Tuple = 20
A : int = model_class_name(__UpperCAmelCase )
A : int = model.encode(inputs_dict['''input_ids'''] )
A , A : Any = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
A : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A : List[str] = model.decode(
decoder_input_ids[:, -1:] , __UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCAmelCase , decoder_position_ids=__UpperCAmelCase , )
A : str = model.decode(__UpperCAmelCase , __UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase )
A : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ):
if attention_mask is None:
A : List[Any] = np.not_equal(lowerCamelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : List[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[Any] = False
def snake_case ( self ) -> Dict:
A : List[str] = FlaxPegasusModelTester(self )
A : List[Any] = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case ( self ) -> int:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A , A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> str:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : Dict = model_class(__UpperCAmelCase )
@jax.jit
def encode_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return model.encode(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
A : Tuple = encode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A : List[str] = encode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self ) -> Optional[Any]:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : Any = model_class(__UpperCAmelCase )
A : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return model.decode(
decoder_input_ids=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , encoder_outputs=__UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
A : Union[str, Any] = decode_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A : Optional[Any] = decode_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self ) -> Dict:
for model_class_name in self.all_model_classes:
A : Tuple = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCAmelCase )
A : List[Any] = np.ones((1, 1) )
A : Optional[Any] = model(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def snake_case ( self ) -> Dict:
A : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
A : List[str] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
A : str = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
A : Dict = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
A : List[Any] = tokenizer(__UpperCAmelCase , return_tensors='''np''' , truncation=__UpperCAmelCase , max_length=5_12 , padding=__UpperCAmelCase )
A : Union[str, Any] = model.generate(**__UpperCAmelCase , num_beams=2 ).sequences
A : int = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert tgt_text == decoded
| 423
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = '''segformer'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[32, 64, 1_60, 2_56] , __UpperCAmelCase=[7, 3, 3, 3] , __UpperCAmelCase=[4, 2, 2, 2] , __UpperCAmelCase=[1, 2, 5, 8] , __UpperCAmelCase=[4, 4, 4, 4] , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_55 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __UpperCAmelCase , )
A : Optional[int] = num_channels
A : int = num_encoder_blocks
A : Optional[Any] = depths
A : List[str] = sr_ratios
A : List[Any] = hidden_sizes
A : Optional[Any] = patch_sizes
A : Any = strides
A : Dict = mlp_ratios
A : Optional[Any] = num_attention_heads
A : int = hidden_act
A : Optional[int] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Optional[int] = classifier_dropout_prob
A : List[Any] = initializer_range
A : int = drop_path_rate
A : Union[str, Any] = layer_norm_eps
A : Union[str, Any] = decoder_hidden_size
A : int = kwargs.get('''reshape_last_stage''' , __UpperCAmelCase )
A : str = semantic_loss_ignore_index
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = version.parse('''1.11''' )
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self ) -> float:
return 1E-4
@property
def snake_case ( self ) -> int:
return 12
| 423
| 1
|
'''simple docstring'''
from typing import Any
import numpy as np
def lowerCAmelCase_ ( snake_case_ : np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(snake_case_ , matrix.conjugate().T )
def lowerCAmelCase_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = v.conjugate().T
UpperCAmelCase_ = v_star.dot(snake_case_ )
assert isinstance(snake_case_ , np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCAmelCase_ = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 78
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """mock-s3-bucket"""
__lowerCamelCase = f'''s3://{mock_bucket}'''
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path.startswith("""s3://""" ) is False
__lowerCamelCase = """./local/path"""
__lowerCamelCase = extract_path_from_uri(_A )
assert dataset_path == new_dataset_path
def UpperCamelCase__ ( _A: List[Any] ):
'''simple docstring'''
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is True
__lowerCamelCase = fsspec.filesystem("""file""" )
__lowerCamelCase = is_remote_filesystem(_A )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _A )
def UpperCamelCase__ ( _A: List[str] , _A: Tuple , _A: List[Any] , _A: Any , _A: List[Any] , _A: Optional[int] , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__lowerCamelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
__lowerCamelCase = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
__lowerCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_A )
assert isinstance(_A , _A )
__lowerCamelCase = os.path.basename(_A )
__lowerCamelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_A , """r""" , encoding="""utf-8""" ) as f, open(_A , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def UpperCamelCase__ ( _A: Optional[Any] , _A: Union[str, Any] , _A: int ):
'''simple docstring'''
__lowerCamelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__lowerCamelCase = compressed_file_paths[protocol]
__lowerCamelCase = """dataset.jsonl"""
__lowerCamelCase = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__lowerCamelCase , *__lowerCamelCase = fsspec.get_fs_token_paths(_A )
assert fs.isfile(_A )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase__ ( _A: str , _A: str , _A: Optional[int] , _A: Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = hf_api.dataset_info(_A , token=_A )
__lowerCamelCase = HfFileSystem(repo_info=_A , token=_A )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_A ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_A , _A , clobber=_A )
with pytest.warns(_A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_A ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 479
| 0
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
SCREAMING_SNAKE_CASE_ = 'facebook/wmt19-en-de'
SCREAMING_SNAKE_CASE_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
SCREAMING_SNAKE_CASE_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
SCREAMING_SNAKE_CASE_ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_ = tokenizer(['Making tiny model'], return_tensors='pt')
SCREAMING_SNAKE_CASE_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
SCREAMING_SNAKE_CASE_ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 466
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) <= 1:
return lst
__UpperCAmelCase: List[str] = 1
while i < len(_lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCAmelCase, __UpperCAmelCase: Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCAmelCase: List[str] = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 466
| 1
|
'''simple docstring'''
from __future__ import annotations
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase ,__lowerCamelCase : str = array[indexa], array[indexa]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if length > 1:
__lowerCamelCase : Tuple = int(length / 2 )
for i in range(_lowerCAmelCase ,low + middle ):
comp_and_swap(_lowerCAmelCase ,_lowerCAmelCase ,i + middle ,_lowerCAmelCase )
bitonic_merge(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
bitonic_merge(_lowerCAmelCase ,low + middle ,_lowerCAmelCase ,_lowerCAmelCase )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> None:
if length > 1:
__lowerCamelCase : Dict = int(length / 2 )
bitonic_sort(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,1 )
bitonic_sort(_lowerCAmelCase ,low + middle ,_lowerCAmelCase ,0 )
bitonic_merge(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 459
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : str , _a : Dict , _a : List[str]=13 , _a : List[str]=7 , _a : Union[str, Any]=True , _a : List[Any]=True , _a : Optional[Any]=True , _a : Any=True , _a : Optional[Any]=99 , _a : List[str]=32 , _a : Optional[Any]=5 , _a : str=4 , _a : str=37 , _a : List[Any]="gelu" , _a : List[Any]=0.1 , _a : Optional[int]=0.1 , _a : Optional[Any]=128 , _a : Tuple=32 , _a : List[Any]=16 , _a : Optional[int]=2 , _a : List[str]=0.02 , _a : List[str]=3 , _a : Any=4 , _a : List[str]=None , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : List[str] = seq_length
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Optional[int] = use_token_type_ids
__lowerCamelCase : Union[str, Any] = use_labels
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : str = type_vocab_size
__lowerCamelCase : Optional[Any] = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = num_labels
__lowerCamelCase : Tuple = num_choices
__lowerCamelCase : Optional[int] = scope
def _lowercase ( self : Optional[int] ) -> Dict:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> int:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def _lowercase ( self : Tuple ) -> Optional[Any]:
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowercase ( self : Optional[int] , _a : List[Any] , _a : Dict , _a : Union[str, Any] , _a : Tuple , _a : Tuple , _a : Dict , _a : Any ) -> Tuple:
__lowerCamelCase : List[Any] = NezhaModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(_a , attention_mask=_a , token_type_ids=_a )
__lowerCamelCase : int = model(_a , token_type_ids=_a )
__lowerCamelCase : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[Any] , _a : List[str] , _a : Dict , _a : Optional[Any] , _a : int , _a : List[str] , _a : Optional[int] , _a : List[str] , _a : Optional[int] , _a : Any , ) -> Dict:
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Union[str, Any] = NezhaModel(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__lowerCamelCase : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__lowerCamelCase : str = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , _a : Optional[int] , _a : int , _a : Optional[Any] , _a : Any , _a : Tuple , _a : Optional[int] , _a : int ) -> List[Any]:
__lowerCamelCase : int = NezhaForMaskedLM(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , _a : Tuple , _a : List[Any] , _a : Any , _a : Optional[Any] , _a : Dict , _a : Dict , _a : List[Any] ) -> str:
__lowerCamelCase : str = NezhaForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : str = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : Any , _a : List[str] , _a : str , _a : List[Any] , _a : str , _a : Union[str, Any] , _a : int , _a : Tuple ) -> Dict:
__lowerCamelCase : List[str] = NezhaForPreTraining(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : int , _a : Dict , _a : Any , _a : Any , _a : Tuple , _a : List[str] , _a : Any , _a : List[Any] ) -> List[Any]:
__lowerCamelCase : Any = NezhaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[int] , _a : str , _a : Tuple , _a : List[str] , _a : List[str] , _a : Any , _a : str , _a : Union[str, Any] ) -> int:
__lowerCamelCase : Optional[int] = self.num_labels
__lowerCamelCase : List[str] = NezhaForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , _a : List[Any] , _a : str , _a : Tuple , _a : Dict , _a : Dict , _a : Union[str, Any] , _a : List[Any] ) -> int:
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : int = NezhaForTokenClassification(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[str] , _a : Any , _a : List[str] , _a : Tuple , _a : Optional[Any] , _a : Optional[Any] , _a : str , _a : Optional[Any] ) -> int:
__lowerCamelCase : List[Any] = self.num_choices
__lowerCamelCase : Optional[Any] = NezhaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : str = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] ) -> Any:
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
__lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a_ =(
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =True
def _lowercase ( self : List[str] , _a : Union[str, Any] , _a : Any , _a : List[Any]=False ) -> Optional[Any]:
__lowerCamelCase : List[str] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
__lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
__lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : Dict = NezhaModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : int ) -> List[str]:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def _lowercase ( self : str ) -> Any:
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowercase ( self : str ) -> str:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _lowercase ( self : List[str] ) -> int:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_a )
def _lowercase ( self : Any ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _lowercase ( self : Optional[int] ) -> str:
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _lowercase ( self : Dict ) -> Any:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = NezhaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase ,__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = model_class(config=_a )
__lowerCamelCase : List[str] = self._prepare_for_class(_a , _a )
__lowerCamelCase : Dict = torch.jit.trace(
_a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , 'bert.pt' ) )
__lowerCamelCase : int = torch.jit.load(os.path.join(_a , 'bert.pt' ) , map_location=_a )
loaded(inputs_dict['input_ids'].to(_a ) , inputs_dict['attention_mask'].to(_a ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict ) -> Optional[int]:
__lowerCamelCase : Dict = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__lowerCamelCase : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : Tuple = model(_a , attention_mask=_a )[0]
__lowerCamelCase : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
@slow
def _lowercase ( self : Dict ) -> Dict:
__lowerCamelCase : Dict = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(_a , attention_mask=_a )[0]
__lowerCamelCase : Optional[Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 459
| 1
|
'''simple docstring'''
from typing import Any
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict ={}
lowerCAmelCase_ : dict ={}
for state in states_space:
lowerCAmelCase_ : Optional[int] =observations_space[0]
lowerCAmelCase_ : Optional[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : str =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase_ : List[Any] =observations_space[o]
lowerCAmelCase_ : Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] =''''''
lowerCAmelCase_ : int =-1
for k_state in states_space:
lowerCAmelCase_ : Dict =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] =probability
lowerCAmelCase_ : List[Any] =k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Optional[Any] =arg_max
# The final observation
lowerCAmelCase_ : str =observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
lowerCAmelCase_ : Optional[Any] =''''''
lowerCAmelCase_ : int =-1
for k_state in states_space:
lowerCAmelCase_ : List[Any] =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : Any =probability
lowerCAmelCase_ : Tuple =k_state
lowerCAmelCase_ : Optional[int] =arg_max
# Process pointers backwards
lowerCAmelCase_ : Optional[int] =last_state
lowerCAmelCase_ : Optional[Any] =[]
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[str] =pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_validate_list(_SCREAMING_SNAKE_CASE , '''observations_space''' )
_validate_list(_SCREAMING_SNAKE_CASE , '''states_space''' )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : int =f'{var_name} must be a list'
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Optional[Any] =f'{var_name} must be a list of strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_validate_dict(_SCREAMING_SNAKE_CASE , '''initial_probabilities''' , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , '''transition_probabilities''' )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Tuple =f'{var_name} must be a dict'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
lowerCAmelCase_ : List[str] =f'{var_name} all keys must be strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
lowerCAmelCase_ : str ='''nested dictionary ''' if nested else ''''''
lowerCAmelCase_ : Tuple =f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : int =cva.getAffineTransform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cva.warpAffine(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowercase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowercase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowercase , __lowercase = gray_img.shape
# set different points to rotate image
__lowercase = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
__lowercase = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
__lowercase = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
__lowercase = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
__lowercase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowercase = plt.figure(1)
__lowercase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 305
| 1
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] ='\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE__ : Dict ='\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCamelCase : Union[str, Any] = np.array([re.sub(__UpperCAmelCase , '''''' , __UpperCAmelCase ) for x in predictions] )
_lowerCamelCase : Tuple = np.array([re.sub(__UpperCAmelCase , '''''' , __UpperCAmelCase ) for x in references] )
else:
_lowerCamelCase : List[Any] = np.asarray(__UpperCAmelCase )
_lowerCamelCase : Dict = np.asarray(__UpperCAmelCase )
if ignore_case:
_lowerCamelCase : Tuple = np.char.lower(__UpperCAmelCase )
_lowerCamelCase : Any = np.char.lower(__UpperCAmelCase )
if ignore_punctuation:
_lowerCamelCase : Tuple = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
_lowerCamelCase : int = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
_lowerCamelCase : List[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
if ignore_numbers:
_lowerCamelCase : List[Any] = string.digits.maketrans('''''' , '''''' , string.digits )
_lowerCamelCase : Union[str, Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
_lowerCamelCase : List[Any] = np.char.translate(__UpperCAmelCase , table=__UpperCAmelCase )
_lowerCamelCase : Optional[int] = predictions == references
return {"exact_match": np.mean(__UpperCAmelCase ) * 100}
| 434
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__A : Optional[Any] = model
__A : str = kwargs.get("model_save_dir" , __UpperCAmelCase )
__A : List[str] = kwargs.get("latest_model_name" , __UpperCAmelCase )
def __call__( self , **__UpperCAmelCase ):
__A : Any = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__A : Optional[Any] = "CPUExecutionProvider"
return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__A : Any = self.model_save_dir.joinpath(self.latest_model_name )
__A : List[str] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__A : str = self.model_save_dir.joinpath(__UpperCAmelCase )
if src_path.exists():
__A : Any = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase , ):
if os.path.isfile(__UpperCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# saving model weights/files
self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__UpperCAmelCase ):
__A : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
__A : List[Any] = Path(__UpperCAmelCase )
# load model from hub
else:
# download model
__A : List[str] = hf_hub_download(
repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , )
__A : Optional[int] = Path(__UpperCAmelCase ).parent
__A : List[str] = Path(__UpperCAmelCase ).name
__A : List[str] = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
return cls(model=__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = None
if len(str(__UpperCAmelCase ).split("@" ) ) == 2:
__A , __A : int = model_id.split("@" )
return cls._from_pretrained(
model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
| 520
| 0
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =torch.exp(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =torch.sum(lowerCAmelCase_ ,dim=1 ) # sum of exp(x_i)
SCREAMING_SNAKE_CASE_ : Tuple =torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase_ ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__()
SCREAMING_SNAKE_CASE_ : int =config.output_attentions
SCREAMING_SNAKE_CASE_ : Dict =config.output_hidden_states
SCREAMING_SNAKE_CASE_ : Optional[int] =nn.ModuleList([BertLayer(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE_ : str =nn.ModuleList([BertHighway(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowerCamelCase ( self , __UpperCAmelCase ):
if (type(__UpperCAmelCase ) is float) or (type(__UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] =x
else:
SCREAMING_SNAKE_CASE_ : Any =x
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : Tuple =()
SCREAMING_SNAKE_CASE_ : List[Any] =()
SCREAMING_SNAKE_CASE_ : List[Any] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] =all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_ : Dict =layer_module(
__UpperCAmelCase , __UpperCAmelCase , head_mask[i] , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =layer_outputs[0]
if self.output_attentions:
SCREAMING_SNAKE_CASE_ : str =all_attentions + (layer_outputs[1],)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =current_outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE_ : Dict =current_outputs + (all_attentions,)
SCREAMING_SNAKE_CASE_ : Tuple =self.highway[i](__UpperCAmelCase )
# logits, pooled_output
if not self.training:
SCREAMING_SNAKE_CASE_ : List[str] =highway_exit[0]
SCREAMING_SNAKE_CASE_ : Tuple =entropy(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
SCREAMING_SNAKE_CASE_ : List[str] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
SCREAMING_SNAKE_CASE_ : List[str] =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__UpperCAmelCase , i + 1 )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ : List[Any] =all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_ : List[str] =(hidden_states,)
if self.output_hidden_states:
SCREAMING_SNAKE_CASE_ : Optional[Any] =outputs + (all_hidden_states,)
if self.output_attentions:
SCREAMING_SNAKE_CASE_ : List[str] =outputs + (all_attentions,)
SCREAMING_SNAKE_CASE_ : Any =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __A , )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Any =config
SCREAMING_SNAKE_CASE_ : Tuple =BertEmbeddings(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Any =DeeBertEncoder(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =BertPooler(__UpperCAmelCase )
self.init_weights()
def __lowerCamelCase ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __lowerCamelCase ( self ):
return self.embeddings.word_embeddings
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =value
def __lowerCamelCase ( self , __UpperCAmelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE_ : int =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
SCREAMING_SNAKE_CASE_ : Optional[int] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] =torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
SCREAMING_SNAKE_CASE_ : Tuple =torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE_ : torch.Tensor =self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
SCREAMING_SNAKE_CASE_ : int =encoder_attention_mask[:, None, None, :]
SCREAMING_SNAKE_CASE_ : List[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
SCREAMING_SNAKE_CASE_ : Tuple =(1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE_ : List[str] =self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Dict =encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : Dict =self.pooler(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =message
SCREAMING_SNAKE_CASE_ : Tuple =exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__()
SCREAMING_SNAKE_CASE_ : int =BertPooler(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ : Optional[int] =nn.Linear(config.hidden_size , config.num_labels )
def __lowerCamelCase ( self , __UpperCAmelCase ):
# Pooler
SCREAMING_SNAKE_CASE_ : List[str] =encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : Tuple =self.pooler(__UpperCAmelCase )
# "return" pooler_output
# BertModel
SCREAMING_SNAKE_CASE_ : Tuple =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
SCREAMING_SNAKE_CASE_ : Tuple =bmodel_output[1]
SCREAMING_SNAKE_CASE_ : Optional[int] =self.dropout(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.classifier(__UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __A , )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
super().__init__(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Any =config.num_labels
SCREAMING_SNAKE_CASE_ : List[str] =config.num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple =DeeBertModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ : int =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.num_layers
try:
SCREAMING_SNAKE_CASE_ : Dict =self.bert(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
SCREAMING_SNAKE_CASE_ : Tuple =outputs[1]
SCREAMING_SNAKE_CASE_ : Any =self.dropout(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =self.classifier(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_ : List[Any] =e.message
SCREAMING_SNAKE_CASE_ : Dict =e.exit_layer
SCREAMING_SNAKE_CASE_ : Union[str, Any] =outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_ : List[Any] =entropy(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =[]
SCREAMING_SNAKE_CASE_ : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : List[str] =MSELoss()
SCREAMING_SNAKE_CASE_ : List[str] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Optional[int] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE_ : int =[]
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_ : Any =highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Any =MSELoss()
SCREAMING_SNAKE_CASE_ : int =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Optional[Any] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
SCREAMING_SNAKE_CASE_ : Optional[Any] =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_ : List[str] =(loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_ : List[str] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_ : Any =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 709
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['DPTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( lowercase: list[float] , lowercase: Optional[Any] ) -> str:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowercase ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase_ ( lowercase: list[dict[str, int]] , lowercase: list[float] , lowercase: int ) -> int:
'''simple docstring'''
for j in range(lowercase ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( lowercase: list[dict[str, int]] , lowercase: int , lowercase: int , lowercase: int ) -> list[float]:
'''simple docstring'''
_UpperCamelCase: Any = [float('''inf''' )] * vertex_count
_UpperCamelCase: str = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
_UpperCamelCase: List[Any] = distance[u] + w
_UpperCamelCase: List[str] = check_negative_cycle(lowercase , lowercase , lowercase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = int(input('''Enter number of vertices: ''').strip())
UpperCAmelCase_ = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase_ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCAmelCase_ = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCAmelCase_ = int(input('''\nEnter shortest path source:''').strip())
UpperCAmelCase_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 271
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( lowercase: Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase: Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , lowercase ).groups()[0]
class __magic_name__ ( __a ):
"""simple docstring"""
def __init__( self : Dict , _lowercase : Any , _lowercase : Any=None , _lowercase : List[str]=None ):
"""simple docstring"""
_UpperCamelCase: str = file_names
_UpperCamelCase: List[Any] = image_transform
_UpperCamelCase: Tuple = label_to_id
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[str] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = self.file_names[idx]
_UpperCamelCase: Optional[int] = PIL.Image.open(_lowercase )
_UpperCamelCase: List[str] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_UpperCamelCase: Optional[Any] = self.image_transform(_lowercase )
_UpperCamelCase: Tuple = extract_label(_lowercase )
if self.label_to_id is not None:
_UpperCamelCase: Any = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( lowercase: Optional[Any] , lowercase: Any ) -> str:
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
_UpperCamelCase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCamelCase: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase: List[str] = config['''lr''']
_UpperCamelCase: int = int(config['''num_epochs'''] )
_UpperCamelCase: Optional[Any] = int(config['''seed'''] )
_UpperCamelCase: Optional[Any] = int(config['''batch_size'''] )
_UpperCamelCase: List[str] = config['''image_size''']
if not isinstance(lowercase , (list, tuple) ):
_UpperCamelCase: Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
_UpperCamelCase: Any = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCamelCase: List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_UpperCamelCase: Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCamelCase: Union[str, Any] = os.path.split(lowercase )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
_UpperCamelCase: List[str] = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_UpperCamelCase: Optional[Any] = [extract_label(lowercase ) for fname in file_names]
_UpperCamelCase: int = list(set(lowercase ) )
id_to_label.sort()
_UpperCamelCase: Tuple = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
_UpperCamelCase: List[str] = np.random.permutation(len(lowercase ) )
_UpperCamelCase: Dict = int(0.8 * len(lowercase ) )
_UpperCamelCase: Optional[int] = random_perm[:cut]
_UpperCamelCase: Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCamelCase: List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCamelCase: int = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
_UpperCamelCase: Dict = Compose([Resize(lowercase ), ToTensor()] )
_UpperCamelCase: str = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
_UpperCamelCase: str = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase: str = create_model('''resnet50d''' , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase: Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCamelCase: Optional[Any] = False
for param in model.get_classifier().parameters():
_UpperCamelCase: Optional[int] = True
# We normalize the batches of images to be a bit faster.
_UpperCamelCase: Union[str, Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_UpperCamelCase: List[Any] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase: List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCamelCase: Any = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase: Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCamelCase: List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCamelCase: Union[str, Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCamelCase: Union[str, Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCamelCase: Union[str, Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCamelCase: List[Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
_UpperCamelCase: Any = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
_UpperCamelCase: List[str] = None
else:
_UpperCamelCase: int = int(training_difference.replace('''step_''' , '''''' ) )
_UpperCamelCase: List[Any] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
_UpperCamelCase: Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCamelCase: Union[str, Any] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCamelCase: Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: int = (batch['''image'''] - mean) / std
_UpperCamelCase: List[str] = model(lowercase )
_UpperCamelCase: Dict = torch.nn.functional.cross_entropy(lowercase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
_UpperCamelCase: Any = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCamelCase: List[Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
_UpperCamelCase: List[str] = 0
_UpperCamelCase: Union[str, Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCamelCase: Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCamelCase: Union[str, Any] = (batch['''image'''] - mean) / std
with torch.no_grad():
_UpperCamelCase: List[Any] = model(lowercase )
_UpperCamelCase: Optional[Any] = outputs.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase: Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_UpperCamelCase: List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCamelCase: Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(lowercase ),
'''epoch''': epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
_UpperCamelCase: Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
_UpperCamelCase: Any = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase: str = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=lowercase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=lowercase , default=lowercase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=lowercase , default=lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCamelCase: Any = parser.parse_args()
_UpperCamelCase: Optional[int] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 271
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : Optional[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :dict[str, list[str]] , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
snake_case_ : List[str] = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ : dict[str, str | None] = {}
snake_case_ : Dict = source_vertex
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
snake_case_ : int = {self.source_vertex}
snake_case_ : Dict = None
snake_case_ : Tuple = [self.source_vertex] # first in first out queue
while queue:
snake_case_ : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase__ )
snake_case_ : List[Any] = vertex
queue.append(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ : Any = self.parent.get(lowerCAmelCase__ )
if target_vertex_parent is None:
snake_case_ : Optional[Any] = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCAmelCase__ )
return self.shortest_path(lowerCAmelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__lowerCamelCase : Tuple = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 701
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_lowerCAmelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_lowerCAmelCase , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_lowerCAmelCase , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_lowerCAmelCase , default=0 , help='''cuda_id.''' , )
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
return args
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] ) -> str:
if not len(_lowerCAmelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = imgs[0].size
UpperCamelCase__ : Tuple = Image.new('''RGB''' , size=(cols * w, rows * h) )
UpperCamelCase__ ,UpperCamelCase__ : int = grid.size
for i, img in enumerate(_lowerCAmelCase ):
grid.paste(_lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Union[str, Any]="robotic cat with wings" , __UpperCAmelCase: List[str]=7.5 , __UpperCAmelCase: List[Any]=50 , __UpperCAmelCase: Dict=1 , __UpperCAmelCase: int=42 , ) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = torch.Generator(pipeline.device ).manual_seed(_lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = pipeline(
_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , ).images
UpperCamelCase__ : List[Any] = int(math.sqrt(_lowerCAmelCase ) )
UpperCamelCase__ : Any = image_grid(_lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase_ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
UpperCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
UpperCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
UpperCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
UpperCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
UpperCAmelCase_ = unet.to(torch.device('cuda', args.cuda_id))
UpperCAmelCase_ = pipeline.to(unet.device)
UpperCAmelCase_ , UpperCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
UpperCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 253
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.exp(_lowerCAmelCase )
__lowerCAmelCase = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i)
__lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCAmelCase ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = config.output_attentions
__lowerCAmelCase = config.output_hidden_states
__lowerCAmelCase = nn.ModuleList([BertLayer(snake_case_ ) for _ in range(config.num_hidden_layers )] )
__lowerCAmelCase = nn.ModuleList([BertHighway(snake_case_ ) for _ in range(config.num_hidden_layers )] )
__lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def A__ ( self , snake_case_ ) -> List[Any]:
if (type(snake_case_ ) is float) or (type(snake_case_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCAmelCase = x
else:
__lowerCAmelCase = x
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A__ ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> List[str]:
__lowerCAmelCase = ()
__lowerCAmelCase = ()
__lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
__lowerCAmelCase = layer_module(
snake_case_ , snake_case_ , head_mask[i] , snake_case_ , snake_case_ )
__lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
__lowerCAmelCase = all_attentions + (layer_outputs[1],)
__lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase = current_outputs + (all_attentions,)
__lowerCAmelCase = self.highway[i](snake_case_ )
# logits, pooled_output
if not self.training:
__lowerCAmelCase = highway_exit[0]
__lowerCAmelCase = entropy(snake_case_ )
__lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case_ , i + 1 )
else:
__lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
__lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase = outputs + (all_attentions,)
__lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Tuple:
super().__init__(snake_case_ )
__lowerCAmelCase = config
__lowerCAmelCase = BertEmbeddings(snake_case_ )
__lowerCAmelCase = DeeBertEncoder(snake_case_ )
__lowerCAmelCase = BertPooler(snake_case_ )
self.init_weights()
def A__ ( self ) -> Dict:
self.encoder.init_highway_pooler(self.pooler )
def A__ ( self ) -> List[Any]:
return self.embeddings.word_embeddings
def A__ ( self , snake_case_ ) -> Tuple:
__lowerCAmelCase = value
def A__ ( self , snake_case_ ) -> List[str]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case_ )
@add_start_docstrings_to_model_forward(snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> Optional[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase = torch.ones(snake_case_ , device=snake_case_ )
if encoder_attention_mask is None:
__lowerCAmelCase = torch.ones(snake_case_ , device=snake_case_ )
if token_type_ids is None:
__lowerCAmelCase = torch.zeros(snake_case_ , dtype=torch.long , device=snake_case_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase = self.get_extended_attention_mask(snake_case_ , snake_case_ , snake_case_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCAmelCase = encoder_attention_mask[:, None, None, :]
__lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase = self.get_head_mask(snake_case_ , self.config.num_hidden_layers )
__lowerCAmelCase = self.embeddings(
input_ids=snake_case_ , position_ids=snake_case_ , token_type_ids=snake_case_ , inputs_embeds=snake_case_ )
__lowerCAmelCase = self.encoder(
snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(snake_case_ )
__lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = message
__lowerCAmelCase = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> List[Any]:
super().__init__()
__lowerCAmelCase = BertPooler(snake_case_ )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def A__ ( self , snake_case_ ) -> Optional[int]:
# Pooler
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(snake_case_ )
# "return" pooler_output
# BertModel
__lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCAmelCase = bmodel_output[1]
__lowerCAmelCase = self.dropout(snake_case_ )
__lowerCAmelCase = self.classifier(snake_case_ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
super().__init__(snake_case_ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = config.num_hidden_layers
__lowerCAmelCase = DeeBertModel(snake_case_ )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=-1 , snake_case_=False , ) -> Optional[int]:
__lowerCAmelCase = self.num_layers
try:
__lowerCAmelCase = self.bert(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCAmelCase = outputs[1]
__lowerCAmelCase = self.dropout(snake_case_ )
__lowerCAmelCase = self.classifier(snake_case_ )
__lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase = e.message
__lowerCAmelCase = e.exit_layer
__lowerCAmelCase = outputs[0]
if not self.training:
__lowerCAmelCase = entropy(snake_case_ )
__lowerCAmelCase = []
__lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCAmelCase = []
for highway_exit in outputs[-1]:
__lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
__lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase = (loss,) + outputs
if not self.training:
__lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 465
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : int = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
_lowerCamelCase : str = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def A__ ( __A : Union[str, Any] ) ->List[Any]:
__A =list(state_dict.keys() )
for name in state_dict_keys:
__A =state_dict.pop(__A )
# emb -> embedding
if name.startswith('''emb.''' ):
__A =name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__A =name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__A =re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __A )
# ffn -> feed_forward
__A =re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __A )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__A =name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__A =name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__A =name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__A ='''rwkv.''' + name
__A =weight
return state_dict
def A__ ( __A : str , __A : Dict , __A : Optional[Any] , __A : Dict=None , __A : Optional[int]=None , __A : List[str]=False , __A : str=None ) ->Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__A =5_02_77
__A =AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__A =PreTrainedTokenizerFast(tokenizer_file=__A )
__A =len(__A )
tokenizer.save_pretrained(__A )
# 2. Build the config
__A =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__A =candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
__A =RwkvConfig(
vocab_size=__A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__A )
# 3. Download model file then convert state_dict
__A =hf_hub_download(__A , __A )
__A =torch.load(__A , map_location='''cpu''' )
__A =convert_state_dict(__A )
# 4. Split in shards and save
__A , __A =shard_checkpoint(__A )
for shard_file, shard in shards.items():
torch.save(__A , os.path.join(__A , __A ) )
if index is not None:
__A =os.path.join(__A , __A )
# Save the index as well
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
__A =json.dumps(__A , indent=2 , sort_keys=__A ) + '''\n'''
f.write(__A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__A =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__A =torch.load(os.path.join(__A , __A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__A , __A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__A =AutoModelForCausalLM.from_pretrained(__A )
model.push_to_hub(__A , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__A )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 516
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : str = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 516
| 1
|
import enum
import shutil
import sys
__snake_case , __snake_case :Union[str, Any] =shutil.get_terminal_size()
__snake_case :str ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
A_ : Optional[Any] = 0
A_ : List[str] = 1
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple="" ) -> Tuple:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]="" ) -> Dict:
'''simple docstring'''
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , lowerCAmelCase__ )
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
forceWrite('\r' )
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 106
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowercase = parse(importlib.metadata.version('torch'))
def __UpperCamelCase ( a : Union[str, Version] , a : str , a : str ) ->Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
snake_case = STR_OPERATION_TO_FUNC[operation]
if isinstance(a , a ):
snake_case = parse(importlib.metadata.version(a ) )
return operation(a , parse(a ) )
def __UpperCamelCase ( a : str , a : str ) ->List[str]:
return compare_versions(a , a , a )
| 342
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 294
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294
| 1
|
'''simple docstring'''
lowerCAmelCase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 531
|
def lowerCAmelCase__ ( lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Tuple = arr.index(max(arr[0:cur]))
# Reverse from 0 to mi
lowerCAmelCase__ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_)]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_)]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case : List[Any] =input('Enter numbers separated by a comma:\n').strip()
__snake_case : Dict =[int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 647
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : str = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = self.dummy_uncond_unet
snake_case : Union[str, Any] = DDIMScheduler()
snake_case : int = self.dummy_vq_model
snake_case : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE_ ,vqvae=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""numpy""" ).images
snake_case : Any = torch.manual_seed(0 )
snake_case : List[str] = ldm(generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=SCREAMING_SNAKE_CASE_ )[0]
snake_case : int = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : int = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case : Optional[Any] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case : str = torch.manual_seed(0 )
snake_case : Dict = ldm(generator=SCREAMING_SNAKE_CASE_ ,num_inference_steps=5 ,output_type="""numpy""" ).images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : List[str] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case : Optional[Any] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 315
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase ( a ):
'''simple docstring'''
with open(_lowerCamelCase , "rb" ) as f:
SCREAMING_SNAKE_CASE_ :str = Image.open(_lowerCamelCase )
return im.convert("RGB" )
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowerCamelCase_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase_ : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the training data."""} )
lowerCamelCase_ : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the validation data."""} )
lowerCamelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCamelCase_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCamelCase_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : List[Any]):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory.")
@dataclass
class _UpperCAmelCase :
lowerCamelCase_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCamelCase_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} , )
lowerCamelCase_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase_ : str = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCamelCase_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCamelCase_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = torch.stack([example["pixel_values"] for example in examples] )
SCREAMING_SNAKE_CASE_ :Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ :str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ :int = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE_ :List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ :Any = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE_ :int = os.path.join(data_args.validation_dir , "**" )
SCREAMING_SNAKE_CASE_ :List[str] = load_dataset(
"imagefolder" , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_ :Tuple = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_ :Tuple = dataset["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_ :Dict = split["""train"""]
SCREAMING_SNAKE_CASE_ :List[Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE_ :Dict = dataset["""train"""].features["""labels"""].names
SCREAMING_SNAKE_CASE_ :Any = {}, {}
for i, label in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE_ :str = str(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ :List[Any] = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE_ :int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE_ :str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ :Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE_ :Any = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_ :Tuple = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE_ :List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE_ :int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE_ :Optional[Any] = Compose(
[
RandomResizedCrop(_lowerCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE_ :int = Compose(
[
Resize(_lowerCamelCase ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(a ):
SCREAMING_SNAKE_CASE_ :List[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(a ):
SCREAMING_SNAKE_CASE_ :Any = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ :int = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowerCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ :List[str] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowerCamelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE_ :List[str] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ :List[Any] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = last_checkpoint
SCREAMING_SNAKE_CASE_ :Optional[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ :Any = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_ :str = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 631
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "LayoutLMv2ImageProcessor"
_UpperCamelCase : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a__ , a__ )
def __call__( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=a__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCAmelCase : List[Any] = features["""words"""]
_lowerCAmelCase : int = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel values
_lowerCAmelCase : Dict = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
_lowerCAmelCase : Any = self.get_overflowing_images(a__ , encoded_inputs["""overflow_to_sample_mapping"""] )
_lowerCAmelCase : Dict = images
return encoded_inputs
def __A ( self , a__ , a__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowerCAmelCase : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a__ ) != len(a__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F" {len(a__ )} and {len(a__ )}" )
return images_with_overflow
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __A ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a__ , )
return self.image_processor
| 213
| 0
|
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85
|
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85
| 1
|
"""simple docstring"""
import math
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = 2
UpperCamelCase : Tuple = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCamelCase : Union[str, Any] = [True] * (end + 1)
UpperCamelCase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = False
start += 1
prime += in_prime
UpperCamelCase : List[str] = end + 1
UpperCamelCase : Optional[Any] = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
UpperCamelCase : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase : Any = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase : List[str] = high + 1
UpperCamelCase : Any = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(1_0**6))
| 102
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Dict = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
__lowerCAmelCase : Tuple = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__lowerCAmelCase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict[str, int]:
__lowercase : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
return x[0]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
__lowercase : Dict = get_letter_count(__lowerCAmelCase )
__lowercase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCAmelCase )
__lowercase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowerCAmelCase )
__lowercase : Tuple = ''''''.join(freq_to_letter[freq] )
__lowercase : Dict = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCAmelCase , reverse=__lowerCAmelCase )
__lowercase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
__lowercase : Any = get_frequency_order(__lowerCAmelCase )
__lowercase : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 509
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A ) == 26
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(A )
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=A ) )
print(timeit("is_pangram_faster()" , setup=A ) )
print(timeit("is_pangram_fastest()" , setup=A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 261
| 0
|
"""simple docstring"""
__A = 9.8_0665
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
| 1
|
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
if len(snake_case ) <= 1:
return [tuple(snake_case )]
__UpperCamelCase = []
def generate(snake_case : int , snake_case : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__UpperCamelCase , __UpperCamelCase = arr[k - 1], arr[i]
else: # k is odd
__UpperCamelCase , __UpperCamelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case )
generate(len(snake_case ) , snake_case )
return res
if __name__ == "__main__":
lowercase__ : int = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Dict = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 451
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'table-transformer'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = version.parse('1.11' )
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-5
@property
def A__ ( self )-> int:
'''simple docstring'''
return 12
| 451
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 400
|
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
snake_case_ = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ = "0" + str(bin(snake_case ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(snake_case )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
snake_case_ = (
"1" + "0" * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400
| 1
|
'''simple docstring'''
lowercase = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowercase = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = from_type.lower().strip('''s''' )
A : Dict = to_type.lower().strip('''s''' )
A : Tuple = UNIT_SYMBOL.get(snake_case__ , snake_case__ )
A : Optional[int] = UNIT_SYMBOL.get(snake_case__ , snake_case__ )
if from_sanitized not in METRIC_CONVERSION:
A : Tuple = (
F'Invalid \'from_type\' value: {from_type!r}.\n'
F'Conversion abbreviations are: {", ".join(snake_case__ )}'
)
raise ValueError(snake_case__ )
if to_sanitized not in METRIC_CONVERSION:
A : int = (
F'Invalid \'to_type\' value: {to_type!r}.\n'
F'Conversion abbreviations are: {", ".join(snake_case__ )}'
)
raise ValueError(snake_case__ )
A : Dict = METRIC_CONVERSION[from_sanitized]
A : Union[str, Any] = METRIC_CONVERSION[to_sanitized]
A : Dict = 1
if from_exponent > to_exponent:
A : Union[str, Any] = from_exponent - to_exponent
else:
A : Tuple = -(to_exponent - from_exponent)
return value * pow(10 , snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343
| 0
|
"""simple docstring"""
__A : Optional[Any] = 0 # The first color of the flag.
__A : Union[str, Any] = 1 # The second color of the flag.
__A : Any = 2 # The third color of the flag.
__A : Dict = (red, white, blue)
def A_ ( snake_case_ : list ):
'''simple docstring'''
if not sequence:
return []
if len(snake_case_ ) == 1:
return list(snake_case_ )
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[Any] = len(snake_case_ ) - 1
UpperCamelCase : Optional[int] = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase , UpperCamelCase : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase , UpperCamelCase : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase : int = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(snake_case_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = input('''Enter numbers separated by commas:\n''').strip()
__A : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 499
|
"""simple docstring"""
import math
def A_ ( snake_case_ : list ,snake_case_ : int = 0 ,snake_case_ : int = 0 ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = end or len(snake_case_ )
for i in range(snake_case_ ,snake_case_ ):
UpperCamelCase : List[str] = i
UpperCamelCase : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase : Dict = array[temp_index - 1]
temp_index -= 1
UpperCamelCase : Dict = temp_index_value
return array
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ): # Max Heap
'''simple docstring'''
UpperCamelCase : Optional[Any] = index
UpperCamelCase : Union[str, Any] = 2 * index + 1 # Left Node
UpperCamelCase : Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase : Union[str, Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase : List[str] = right_index
if largest != index:
UpperCamelCase , UpperCamelCase : Any = array[largest], array[index]
heapify(snake_case_ ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : list ):
'''simple docstring'''
UpperCamelCase : Any = len(snake_case_ )
for i in range(n // 2 ,-1 ,-1 ):
heapify(snake_case_ ,snake_case_ ,snake_case_ )
for i in range(n - 1 ,0 ,-1 ):
UpperCamelCase , UpperCamelCase : str = array[0], array[i]
heapify(snake_case_ ,0 ,snake_case_ )
return array
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Any = low
UpperCamelCase : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase , UpperCamelCase : Union[str, Any] = array[j], array[i]
i += 1
def A_ ( snake_case_ : list ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return array
UpperCamelCase : Union[str, Any] = 2 * math.ceil(math.loga(len(snake_case_ ) ) )
UpperCamelCase : int = 1_6
return intro_sort(snake_case_ ,0 ,len(snake_case_ ) ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : list ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case_ )
max_depth -= 1
UpperCamelCase : Dict = median_of_a(snake_case_ ,snake_case_ ,start + ((end - start) // 2) + 1 ,end - 1 )
UpperCamelCase : str = partition(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
intro_sort(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
UpperCamelCase : List[str] = p
return insertion_sort(snake_case_ ,snake_case_ ,snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = input('''Enter numbers separated by a comma : ''').strip()
__A : List[Any] = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 499
| 1
|
def A ( lowercase__ : float , lowercase__ : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 383
|
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = """tapas"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=3_05_22 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10_24 , lowerCamelCase__ :List[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Optional[int]=10.0 , lowerCamelCase__ :int=0 , lowerCamelCase__ :Dict=1.0 , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any="ratio" , lowerCamelCase__ :int=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :int=64 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :str=None , lowerCamelCase__ :List[Any]=None , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :List[Any] = type_vocab_sizes
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ :List[str] = positive_label_weight
UpperCamelCase__ :int = num_aggregation_labels
UpperCamelCase__ :str = aggregation_loss_weight
UpperCamelCase__ :Optional[Any] = use_answer_as_supervision
UpperCamelCase__ :Tuple = answer_loss_importance
UpperCamelCase__ :Dict = use_normalized_answer_loss
UpperCamelCase__ :Optional[Any] = huber_loss_delta
UpperCamelCase__ :Any = temperature
UpperCamelCase__ :Union[str, Any] = aggregation_temperature
UpperCamelCase__ :Tuple = use_gumbel_for_cells
UpperCamelCase__ :Tuple = use_gumbel_for_aggregation
UpperCamelCase__ :Optional[int] = average_approximation_function
UpperCamelCase__ :Optional[Any] = cell_selection_preference
UpperCamelCase__ :Any = answer_loss_cutoff
UpperCamelCase__ :Dict = max_num_rows
UpperCamelCase__ :Optional[int] = max_num_columns
UpperCamelCase__ :Tuple = average_logits_per_cell
UpperCamelCase__ :Any = select_one_column
UpperCamelCase__ :Dict = allow_empty_column_selection
UpperCamelCase__ :Union[str, Any] = init_cell_selection_weights_to_zero
UpperCamelCase__ :Optional[Any] = reset_position_index_per_cell
UpperCamelCase__ :List[str] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ :Tuple = aggregation_labels
UpperCamelCase__ :str = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()}
| 383
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 585
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase : Optional[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241
| 0
|
def UpperCAmelCase__ ( _A = 100 ):
"""simple docstring"""
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 143
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( a__ ):
_lowerCAmelCase = ""
_lowerCAmelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : int , lowercase__ : Optional[DatasetInfo] = None , lowercase__ : Optional[str] = None , **lowercase__ : List[Any] , ):
super().__init__(self , **lowercase__ )
a_ = repo_info
a_ = token
a_ = None
def __magic_name__ ( self : List[str] ):
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase__ ): {'''name''': str(lowercase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , lowercase__ : str , lowercase__ : str = "rb" , **lowercase__ : Dict , ):
if not isinstance(self.repo_info , lowercase__ ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
a_ = hf_hub_url(self.repo_info.id , lowercase__ , revision=self.repo_info.sha )
return fsspec.open(
lowercase__ , mode=lowercase__ , headers=get_authentication_headers_for_url(lowercase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __magic_name__ ( self : Optional[Any] , lowercase__ : Dict , **lowercase__ : Optional[int] ):
self._get_dirs()
a_ = self._strip_protocol(lowercase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase__ )
def __magic_name__ ( self : Dict , lowercase__ : Dict , lowercase__ : List[Any]=False , **lowercase__ : Tuple ):
self._get_dirs()
a_ = PurePosixPath(path.strip('''/''' ) )
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip('''/''' ) )
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 143
| 1
|
def __snake_case ( lowerCAmelCase_ ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = grid[row_n]
SCREAMING_SNAKE_CASE__ = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = grid[row_n]
return grid[-1][-1]
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __snake_case ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE__ = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE__ = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE__ = [2, 2, 2_0]
SCREAMING_SNAKE_CASE__ = [3, 1_2, 1_6]
SCREAMING_SNAKE_CASE__ = [1_9_2, 7_6_8, 1_0_2_4]
SCREAMING_SNAKE_CASE__ = CvtForImageClassification(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location=torch.device('''cpu''' ) )
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE__ = list_of_state_dict + cls_token(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + embeddings(lowerCAmelCase_ )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE__ = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 100
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutLMv2FeatureExtractor']
_A = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[Any]:
with open(__UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall(R"\[([^\]]+)\]" , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCAmelCase , "__init__.py" )
SCREAMING_SNAKE_CASE__ = parse_init(__UpperCAmelCase )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ = direct_transformers_import(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , "__init__.py" ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 538
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
class __magic_name__ ( metaclass=_UpperCamelCase ):
UpperCamelCase__ = ["""sentencepiece"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''sentencepiece'''] )
| 72
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """pix2struct_text_model"""
_UpperCamelCase : Dict = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case=50_244 , snake_case=768 , snake_case=64 , snake_case=2_048 , snake_case=12 , snake_case=12 , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=1.0 , snake_case="gelu_new" , snake_case=0 , snake_case=False , snake_case=0 , snake_case=1 , snake_case=False , snake_case=True , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Tuple = vocab_size
a__ : List[str] = hidden_size
a__ : Optional[Any] = d_kv
a__ : List[str] = d_ff
a__ : Optional[int] = num_layers
a__ : Tuple = num_heads
a__ : List[Any] = relative_attention_num_buckets
a__ : Optional[int] = relative_attention_max_distance
a__ : Optional[int] = dropout_rate
a__ : List[Any] = layer_norm_epsilon
a__ : List[str] = initializer_factor
a__ : Any = use_cache
a__ : Any = eos_token_id
a__ : List[Any] = decoder_start_token_id
# for backwards compatibility
a__ : Tuple = dense_act_fn
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , tie_word_embeddings=snake_case , is_decoder=snake_case , **snake_case , )
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(snake_case )
a__ , a__ : Optional[Any] = cls.get_config_dict(snake_case , **snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """pix2struct_vision_model"""
def __init__( self , snake_case=768 , snake_case=768 , snake_case=2_048 , snake_case=64 , snake_case=12 , snake_case=12 , snake_case="gelu_new" , snake_case=1E-6 , snake_case=0.0 , snake_case=0.0 , snake_case=1E-10 , snake_case=1.0 , snake_case=4_096 , snake_case=32 , snake_case=128 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
a__ : Optional[Any] = hidden_size
a__ : Tuple = patch_embed_hidden_size
a__ : Tuple = d_ff
a__ : List[Any] = dropout_rate
a__ : Optional[Any] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : List[str] = initializer_range
a__ : List[str] = initializer_factor
a__ : List[Any] = attention_dropout
a__ : Optional[int] = layer_norm_eps
a__ : int = dense_act_fn
a__ : Optional[Any] = seq_len
a__ : List[Any] = relative_attention_num_buckets
a__ : int = relative_attention_max_distance
a__ : int = d_kv
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(snake_case )
a__ , a__ : int = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
a__ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """pix2struct"""
_UpperCamelCase : Dict = True
def __init__( self , snake_case=None , snake_case=None , snake_case=1.0 , snake_case=0.02 , snake_case=False , snake_case=False , snake_case=True , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case , is_encoder_decoder=snake_case , **snake_case )
if text_config is None:
a__ : Optional[Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
a__ : List[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
a__ : List[str] = PixaStructTextConfig(**snake_case )
a__ : List[str] = PixaStructVisionConfig(**snake_case )
a__ : Tuple = self.text_config.decoder_start_token_id
a__ : int = self.text_config.pad_token_id
a__ : List[Any] = self.text_config.eos_token_id
a__ : str = initializer_factor
a__ : Dict = initializer_range
a__ : Optional[int] = self.initializer_range
a__ : Union[str, Any] = self.initializer_range
a__ : Union[str, Any] = is_vqa
@classmethod
def _snake_case ( cls , snake_case , snake_case , **snake_case ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : List[str] = self.text_config.to_dict()
a__ : Union[str, Any] = self.vision_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 112
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = XLMTokenizer
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase : str = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """lower newer"""
UpperCAmelCase : Dict = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase : Optional[int] = """lower"""
UpperCAmelCase : Optional[int] = ["""low""", """er</w>"""]
UpperCAmelCase : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCAmelCase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 359
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A: Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359
| 1
|
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__a =str(bin(_snake_case ) )[2:] # remove the leading "0b"
__a =str(bin(_snake_case ) )[2:]
__a =max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
|
from __future__ import annotations
class __magic_name__ :
def __init__( self , __snake_case ) -> None:
'''simple docstring'''
__a =order
# a_{0} ... a_{k}
__a =[1.0] + [0.0] * order
# b_{0} ... b_{k}
__a =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__a =[0.0] * self.order
# y[n-1] ... y[n-k]
__a =[0.0] * self.order
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
if len(__snake_case ) < self.order:
__a =[1.0, *a_coeffs]
if len(__snake_case ) != self.order + 1:
__a =(
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__snake_case )}'
)
raise ValueError(__snake_case )
if len(__snake_case ) != self.order + 1:
__a =(
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__snake_case )}'
)
raise ValueError(__snake_case )
__a =a_coeffs
__a =b_coeffs
def __magic_name__ ( self , __snake_case ) -> float:
'''simple docstring'''
__a =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__a =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__a =self.input_history[:-1]
__a =self.output_history[:-1]
__a =sample
__a =result
return result
| 242
| 1
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = [False] * len(__lowerCamelCase )
__snake_case : Optional[Any] = [-1] * len(__lowerCamelCase )
def dfs(__lowerCamelCase , __lowerCamelCase ):
__snake_case : str = True
__snake_case : Union[str, Any] = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCamelCase , 1 - c )
for i in range(len(__lowerCamelCase ) ):
if not visited[i]:
dfs(__lowerCamelCase , 0 )
for i in range(len(__lowerCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 203
|
import unittest
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ):
__snake_case : List[str] = np.shape(__lowerCamelCase )
__snake_case : Optional[Any] = np.shape(__lowerCamelCase )
__snake_case : List[str] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
__snake_case : Any = (
"Expected the same number of rows for A and B. "
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
__snake_case : int = (
"Expected the same number of columns for B and C. "
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__lowerCamelCase )
__snake_case : str = pseudo_inv
if a_inv is None:
try:
__snake_case : Optional[Any] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : str = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Dict = np.array([[2, 1], [6, 3]] )
__snake_case : Dict = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : int = np.block([[a, b], [b.T, c]] )
__snake_case : Optional[int] = np.linalg.det(lowerCamelCase )
__snake_case : Any = np.linalg.det(lowerCamelCase )
__snake_case : Tuple = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def __snake_case ( self : int ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> None:
__snake_case : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 203
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 659
|
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659
| 1
|
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase__ , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , UpperCamelCase__ ) if is_prime[i]]
def A__ ( UpperCamelCase__ = 800_800 , UpperCamelCase__ = 800_800 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = degree * loga(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = int(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = calculate_prime_numbers(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(UpperCamelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 168
|
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 168
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.