code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 1
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
_SCREAMING_SNAKE_CASE : Tuple = generate_large_matrix()
_SCREAMING_SNAKE_CASE : int = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
assert all(row == sorted(__UpperCamelCase , reverse=__UpperCamelCase ) for row in grid )
assert all(list(__UpperCamelCase ) == sorted(__UpperCamelCase , reverse=__UpperCamelCase ) for col in zip(*__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
A__ : List[str] = 0
A__ : int = len(__UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A__ : Union[str, Any] = (left + right) // 2
A__ : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A__ : Optional[int] = mid + 1
else:
A__ : Any = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Dict = len(grid[0] )
for i in range(len(__UpperCamelCase ) ):
A__ : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCamelCase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[Any] = 0
for row in grid:
for i, number in enumerate(__UpperCamelCase ):
if number < 0:
total += len(__UpperCamelCase ) - i
break
return total
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
A__ : Any = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A__ : Union[str, Any] = timeit(F"{func}(grid=grid)" , setup=__UpperCamelCase , number=5_00 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 2000 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ):
A__ : Tuple = self.unet.config.sample_size
A__ : List[Any] = (batch_size, 3, img_size, img_size)
A__ : Tuple = self.unet
A__ : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ ) * self.scheduler.init_noise_sigma
A__ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
self.scheduler.set_sigmas(UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A__ : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
A__ : Any = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
# prediction step
A__ : Any = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A__ : Dict = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A__ , A__ : str = output.prev_sample, output.prev_sample_mean
A__ : List[str] = sample_mean.clamp(0 , 1 )
A__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[int] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
import itertools
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ : List[Any] = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1_00_01 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : List[str] = 2
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
UpperCamelCase__="<s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=None , ):
A__ , A__ , A__ , A__ : str = bos, unk, pad, eos
A__ : Optional[int] = []
A__ : List[str] = []
A__ : Any = {}
A__ : str = self.add_symbol(UpperCamelCase__ )
A__ : Optional[Any] = self.add_symbol(UpperCamelCase__ )
A__ : Optional[Any] = self.add_symbol(UpperCamelCase__ )
A__ : str = self.add_symbol(UpperCamelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCamelCase__ )
A__ : Optional[int] = len(self.symbols )
def __eq__( self , UpperCamelCase__ ):
return self.indices == other.indices
def __getitem__( self , UpperCamelCase__ ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , UpperCamelCase__ ):
return sym in self.indices
@classmethod
def __snake_case ( cls , UpperCamelCase__ ):
A__ : int = cls()
d.add_from_file(UpperCamelCase__ )
return d
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False ):
if word in self.indices and not overwrite:
A__ : Optional[Any] = self.indices[word]
A__ : Any = self.count[idx] + n
return idx
else:
A__ : Optional[Any] = len(self.symbols )
A__ : List[str] = idx
self.symbols.append(UpperCamelCase__ )
self.count.append(UpperCamelCase__ )
return idx
def __snake_case ( self , UpperCamelCase__ ):
return 0
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCamelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(UpperCamelCase__ ) )
return
A__ : List[Any] = f.readlines()
A__ : Any = self._load_meta(UpperCamelCase__ )
for line in lines[indices_start_line:]:
try:
A__ , A__ : str = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
A__ : List[Any] = True
A__ , A__ : List[Any] = line.rsplit(''' ''' , 1 )
else:
A__ : Optional[int] = False
A__ : Dict = int(UpperCamelCase__ )
A__ : int = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(UpperCamelCase__ ) )
self.add_symbol(UpperCamelCase__ , n=UpperCamelCase__ , overwrite=UpperCamelCase__ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] = dict((re.sub(R'''@@$''' , '''''' , __UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __UpperCamelCase ), v) for k, v in d.items() )
A__ : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
A__ : int = d[k] # restore
return da
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
if not os.path.exists(__UpperCamelCase ):
raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
A__ : Tuple = os.path.join(__UpperCamelCase , '''checkpoint.pt''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(F"path to the file {checkpoint_file} does not exist!" )
A__ : Dict = torch.load(__UpperCamelCase , map_location='''cpu''' )
A__ : Union[str, Any] = chkpt['''cfg''']['''model''']
# dicts
A__ : Dict = os.path.join(__UpperCamelCase , '''dict.txt''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(F"path to the file {dict_file} does not exist!" )
A__ : Union[str, Any] = Dictionary.load(__UpperCamelCase )
A__ : Any = rewrite_dict_keys(src_dict.indices )
A__ : Tuple = len(__UpperCamelCase )
A__ : List[str] = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# merges_file (bpecodes)
A__ : List[Any] = os.path.join(__UpperCamelCase , '''bpecodes''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(F"path to the file {bpecodes_file} does not exist!" )
A__ : Optional[int] = os.path.join(__UpperCamelCase , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(__UpperCamelCase , __UpperCamelCase )
# model config
A__ : Optional[Any] = os.path.join(__UpperCamelCase , '''config.json''' )
A__ : Optional[Any] = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"Generating {biogpt_model_config_file}" )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# tokenizer config
A__ : Tuple = os.path.join(__UpperCamelCase , __UpperCamelCase )
A__ : List[Any] = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"Generating {biogpt_tokenizer_config_file}" )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase , ensure_ascii=__UpperCamelCase , indent=__UpperCamelCase ) )
# model
A__ : Union[str, Any] = chkpt['''model''']
# remove unneeded keys
A__ : str = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
A__ : int = model_state_dict.pop(__UpperCamelCase )
else:
A__ : Tuple = model_state_dict.pop(__UpperCamelCase )
A__ : Optional[int] = BioGptConfig.from_pretrained(__UpperCamelCase )
A__ : Dict = BioGptForCausalLM(__UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(__UpperCamelCase )
# save
A__ : Dict = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(__UpperCamelCase , __UpperCamelCase )
print('''Conversion is done!''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=0.9 , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ):
A__ : List[str] = size if size is not None else {'''shortest_edge''': 30}
A__ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
A__ : List[str] = parent
A__ : Union[str, Any] = batch_size
A__ : Any = num_channels
A__ : Optional[int] = min_resolution
A__ : Any = max_resolution
A__ : List[str] = do_resize_and_center_crop
A__ : Dict = size
A__ : Dict = crop_pct
A__ : Union[str, Any] = crop_size
A__ : str = do_normalize
A__ : Optional[Any] = image_mean
A__ : List[Any] = image_std
def __snake_case ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __snake_case ( self ):
A__ : int = PoolFormerImageProcessingTester(self )
@property
def __snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self ):
A__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''crop_pct''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
def __snake_case ( self ):
A__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
A__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __snake_case ( self ):
pass
def __snake_case ( self ):
# Initialize image_processing
A__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ : Dict = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self ):
# Initialize image_processing
A__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ : Tuple = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __snake_case ( self ):
# Initialize image_processing
A__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A__ : str = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger()
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 0
_lowerCAmelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = field(default_factory=SCREAMING_SNAKE_CASE_ )
def __call__( self , UpperCamelCase__ ):
A__ : Any = Tracker(self.dest )(UpperCamelCase__ ).parametrized
A__ : Optional[Any] = Tracker(self.src )(UpperCamelCase__ ).parametrized
A__ : Tuple = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
A__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise Exception(
F"Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"
F" destination module has {len(UpperCamelCase__ )}." )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : ResNetConfig , __UpperCamelCase : Path , __UpperCamelCase : bool = True ) -> Union[str, Any]:
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
A__ : List[str] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
A__ : List[str] = ResNetForImageClassification(__UpperCamelCase ).eval()
A__ : List[Any] = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
A__ : Any = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
A__ : Union[str, Any] = F"resnet{'-'.join(name.split('resnet' ) )}"
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
A__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__UpperCamelCase , )
print(F"Pushed {checkpoint_name}" )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Path , __UpperCamelCase : str = None , __UpperCamelCase : bool = True ) -> Dict:
"""simple docstring"""
A__ : Optional[int] = '''imagenet-1k-id2label.json'''
A__ : Optional[int] = 10_00
A__ : Optional[int] = (1, num_labels)
A__ : Dict = '''huggingface/label-files'''
A__ : List[Any] = num_labels
A__ : str = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : str = idalabel
A__ : List[str] = {v: k for k, v in idalabel.items()}
A__ : Dict = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
A__ : str = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
import os
import sys
import unittest
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_SCREAMING_SNAKE_CASE : Dict = os.path.join(git_repo_path, 'src', 'transformers')
_SCREAMING_SNAKE_CASE : Dict = '\n{0} = None\n'
_SCREAMING_SNAKE_CASE : Dict = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_SCREAMING_SNAKE_CASE : int = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : List[str] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(UpperCamelCase__ )
A__ : Tuple = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase__ , '''tokenizers''' )
A__ : Tuple = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase__ , '''tensorflow_text''' )
A__ : Any = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers''' )
A__ : int = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tensorflow_text''' )
A__ : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __snake_case ( self ):
A__ : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase__ )
self.assertIn('''tensorflow_text''' , UpperCamelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __snake_case ( self ):
A__ : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , '''\nCONSTANT = None\n''' )
A__ : int = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A__ : Optional[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
A__ : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
A__ : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase__ )
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "detr"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=100 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=2048 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = backbone_config.get('''model_type''' )
A__ : int = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
# set timm attributes to None
A__ , A__ , A__ : List[Any] = None, None, None
A__ : Any = use_timm_backbone
A__ : List[Any] = backbone_config
A__ : str = num_channels
A__ : Union[str, Any] = num_queries
A__ : Optional[Any] = d_model
A__ : List[Any] = encoder_ffn_dim
A__ : Dict = encoder_layers
A__ : int = encoder_attention_heads
A__ : int = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Tuple = decoder_attention_heads
A__ : Optional[Any] = dropout
A__ : str = attention_dropout
A__ : int = activation_dropout
A__ : List[Any] = activation_function
A__ : List[Any] = init_std
A__ : str = init_xavier_std
A__ : str = encoder_layerdrop
A__ : List[Any] = decoder_layerdrop
A__ : Tuple = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[str] = position_embedding_type
A__ : Tuple = backbone
A__ : List[str] = use_pretrained_backbone
A__ : List[str] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : List[Any] = bbox_cost
A__ : List[str] = giou_cost
# Loss coefficients
A__ : int = mask_loss_coefficient
A__ : Tuple = dice_loss_coefficient
A__ : Optional[Any] = bbox_loss_coefficient
A__ : List[str] = giou_loss_coefficient
A__ : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
@classmethod
def __snake_case ( cls , UpperCamelCase__ , **UpperCamelCase__ ):
return cls(backbone_config=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A__ : Union[str, Any] = self.backbone_config.to_dict()
A__ : Any = self.__class__.model_type
return output
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = version.parse("1.11" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __snake_case ( self ):
return 1e-5
@property
def __snake_case ( self ):
return 12
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
import qiskit
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
A__ : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
A__ : int = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ : List[str] = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 55
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 1
|
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : x[0] / x[1] , reverse=__UpperCamelCase )
A__ , A__ : Dict = [i[0] for i in r], [i[1] for i in r]
A__ : Any = list(accumulate(__UpperCamelCase ) )
A__ : Any = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : list[Any] = []
A__ : int = 0
A__ : int = 0
def __snake_case ( self ):
return self.head == self.tail
def __snake_case ( self , UpperCamelCase__ ):
self.data.append(UpperCamelCase__ )
A__ : List[Any] = self.tail + 1
def __snake_case ( self ):
A__ : Tuple = self.data[self.head]
A__ : List[str] = self.head + 1
return ret
def __snake_case ( self ):
return self.tail - self.head
def __snake_case ( self ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
A__ : Union[str, Any] = data
A__ : MyNode | None = None
A__ : MyNode | None = None
A__ : int = 1
def __snake_case ( self ):
return self.data
def __snake_case ( self ):
return self.left
def __snake_case ( self ):
return self.right
def __snake_case ( self ):
return self.height
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = data
def __snake_case ( self , UpperCamelCase__ ):
A__ : Any = node
def __snake_case ( self , UpperCamelCase__ ):
A__ : Union[str, Any] = node
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = height
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> MyNode:
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
A__ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCamelCase )
A__ : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
A__ : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> MyNode:
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
A__ : Union[str, Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCamelCase )
A__ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
A__ : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> MyNode:
"""simple docstring"""
A__ : Dict = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase ) )
return right_rotation(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> MyNode:
"""simple docstring"""
A__ : List[str] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase ) )
return left_rotation(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode | None , __UpperCamelCase : Any ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(__UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A__ : List[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A__ : int = right_rotation(__UpperCamelCase )
else:
A__ : Dict = lr_rotation(__UpperCamelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A__ : List[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A__ : List[Any] = rl_rotation(__UpperCamelCase )
else:
A__ : Optional[Any] = left_rotation(__UpperCamelCase )
A__ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
return node
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> Any:
"""simple docstring"""
while True:
A__ : Optional[Any] = root.get_right()
if right_child is None:
break
A__ : Union[str, Any] = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode ) -> Any:
"""simple docstring"""
while True:
A__ : str = root.get_left()
if left_child is None:
break
A__ : List[str] = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : MyNode , __UpperCamelCase : Any ) -> MyNode | None:
"""simple docstring"""
A__ : str = root.get_left()
A__ : Optional[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A__ : str = get_left_most(__UpperCamelCase )
root.set_data(__UpperCamelCase )
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
elif left_child is not None:
A__ : str = left_child
elif right_child is not None:
A__ : Any = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__UpperCamelCase , __UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
if get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A__ : int = left_rotation(__UpperCamelCase )
else:
A__ : Any = rl_rotation(__UpperCamelCase )
elif get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A__ : Dict = right_rotation(__UpperCamelCase )
else:
A__ : Optional[Any] = lr_rotation(__UpperCamelCase )
A__ : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCamelCase )
return root
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : MyNode | None = None
def __snake_case ( self ):
return get_height(self.root )
def __snake_case ( self , UpperCamelCase__ ):
print('''insert:''' + str(UpperCamelCase__ ) )
A__ : Tuple = insert_node(self.root , UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
print('''delete:''' + str(UpperCamelCase__ ) )
if self.root is None:
print('''Tree is empty!''' )
return
A__ : Union[str, Any] = del_node(self.root , UpperCamelCase__ )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
A__ : Union[str, Any] = ''''''
A__ : Union[str, Any] = MyQueue()
q.push(self.root )
A__ : int = self.get_height()
if layer == 0:
return output
A__ : List[Any] = 0
while not q.is_empty():
A__ : Optional[int] = q.pop()
A__ : int = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A__ : Dict = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
A__ : Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_SCREAMING_SNAKE_CASE : List[str] = AVLtree()
_SCREAMING_SNAKE_CASE : Optional[Any] = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 55
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 1
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowerCAmelCase = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCAmelCase = Features({"question": Value("string" ), "context": Value("string" )} )
_lowerCAmelCase = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_lowerCAmelCase = "question"
_lowerCAmelCase = "context"
_lowerCAmelCase = "answers"
@property
def __snake_case ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 55
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] = torch.nn.Linear(2 , 4 )
A__ : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
A__ : Any = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
A__ : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A__ : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__UpperCamelCase )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_cuda
def __snake_case ( self ):
A__ : Any = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCamelCase__ ):
A__ : Dict = Accelerator(cpu=UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = Accelerator()
A__ : str = GradientState()
assert state.num_steps == 1
A__ : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A__ : int = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self ):
A__ : Optional[int] = Accelerator()
A__ , A__ , A__ , A__ , A__ : Dict = create_components()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Any = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __snake_case ( self ):
A__ : Dict = Accelerator()
A__ , A__ , A__ , A__ , A__ : str = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __snake_case ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCamelCase__ , **UpperCamelCase__ ):
pass
with patch('''torch.cuda.set_device''' , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
A__ : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def __snake_case ( self ):
A__ : Tuple = Accelerator()
A__ , A__ , A__ , A__ , A__ : Any = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] = get_signature(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
def __snake_case ( self ):
A__ : List[Any] = Accelerator()
A__ , A__ , A__ , A__ , A__ : Optional[int] = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : Union[str, Any] = get_signature(UpperCamelCase__ )
# saving hook
def save_config(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(UpperCamelCase__ , '''data.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# loading hook
def load_config(UpperCamelCase__ , UpperCamelCase__ ):
with open(os.path.join(UpperCamelCase__ , '''data.json''' ) , '''r''' ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
A__ : List[Any] = config['''class_name''']
A__ : Dict = accelerator.register_save_state_pre_hook(UpperCamelCase__ )
A__ : Optional[int] = accelerator.register_load_state_pre_hook(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
A__ : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
A__ : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __snake_case ( self ):
A__ : int = Accelerator()
A__ , A__ , A__ , A__ , A__ : Tuple = create_components()
A__ : List[Any] = None
# This should work
A__ , A__ , A__ , A__ , A__ , A__ : Tuple = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(dummy_obj is None )
def __snake_case ( self ):
A__ : str = Accelerator()
A__ , A__ , A__ , A__ , A__ : int = create_components()
A__ : Any = [1, 2, 3]
# This should work
A__ , A__ , A__ , A__ , A__ , A__ : List[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map={'''''': 0} , )
A__ : Optional[int] = Accelerator()
# This should work
A__ : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : List[str] = Accelerator()
with init_empty_weights():
A__ : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
A__ : Any = infer_auto_device_map(UpperCamelCase__ )
A__ : Any = '''cpu'''
A__ : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ )
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : Any = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
A__ : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
A__ : Dict = infer_auto_device_map(UpperCamelCase__ )
A__ : Dict = 1
A__ : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
A__ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
A__ : Dict = accelerator.prepare(UpperCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
A__ : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
A__ : Tuple = infer_auto_device_map(UpperCamelCase__ )
A__ : Optional[int] = 1
A__ : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
A__ : List[Any] = Accelerator()
# This should work
A__ : List[Any] = accelerator.prepare(UpperCamelCase__ )
@require_cuda
def __snake_case ( self ):
A__ : int = torch.nn.Linear(10 , 10 )
A__ : int = torch.optim.SGD(model.parameters() , lr=0.0_1 )
A__ : str = Accelerator(cpu=UpperCamelCase__ )
A__ : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : int = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = 'Hello, World!'
_SCREAMING_SNAKE_CASE : str = 'en_XX'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Dict:
"""simple docstring"""
A__ : Any = Path('''data_bin''' )
A__ : int = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__UpperCamelCase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__UpperCamelCase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
A__ : List[Any] = xmod.model.encoder.sentence_encoder
A__ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A__ : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __UpperCamelCase )
A__ : Optional[int] = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A__ : List[Any] = xmod_sent_encoder.embed_tokens.weight
A__ : int = xmod_sent_encoder.embed_positions.weight
A__ : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A__ : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
A__ : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A__ : Optional[int] = model.roberta.encoder.layer[i]
A__ : int = xmod_sent_encoder.layers[i]
# self attention
A__ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
A__ : Dict = xmod_layer.self_attn.q_proj.weight
A__ : Optional[Any] = xmod_layer.self_attn.q_proj.bias
A__ : Optional[Any] = xmod_layer.self_attn.k_proj.weight
A__ : List[str] = xmod_layer.self_attn.k_proj.bias
A__ : Any = xmod_layer.self_attn.v_proj.weight
A__ : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
A__ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
A__ : Any = xmod_layer.self_attn.out_proj.weight
A__ : List[Any] = xmod_layer.self_attn.out_proj.bias
A__ : Tuple = xmod_layer.self_attn_layer_norm.weight
A__ : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
A__ : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
A__ : List[str] = xmod_layer.fca.weight
A__ : Any = xmod_layer.fca.bias
# output
A__ : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
A__ : Any = xmod_layer.fca.weight
A__ : int = xmod_layer.fca.bias
A__ : List[Any] = xmod_layer.final_layer_norm.weight
A__ : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A__ : Dict = xmod_layer.adapter_layer_norm.weight
A__ : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A__ : List[str] = bert_output.adapter_modules[lang_code]
A__ : Dict = xmod_layer.adapter_modules[lang_code]
A__ : Dict = from_adapter.fca.weight
A__ : List[str] = from_adapter.fca.bias
A__ : Dict = from_adapter.fca.weight
A__ : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A__ : Dict = xmod_sent_encoder.layer_norm.weight
A__ : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A__ : Any = xmod.model.classification_heads['''mnli'''].dense.weight
A__ : int = xmod.model.classification_heads['''mnli'''].dense.bias
A__ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
A__ : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A__ : List[str] = xmod.model.encoder.lm_head.dense.weight
A__ : int = xmod.model.encoder.lm_head.dense.bias
A__ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
A__ : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A__ : str = xmod.model.encoder.lm_head.weight
A__ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A__ : List[Any] = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
A__ : str = model(__UpperCamelCase )[0]
if classification_head:
A__ : Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__UpperCamelCase ) )
else:
A__ : Optional[Any] = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A__ : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
A__ : int = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 55
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
A__ : Dict = VideoClassificationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ , top_k=2 )
A__ : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
for example in examples:
A__ : Tuple = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
{'''score''': ANY(UpperCamelCase__ ), '''label''': ANY(UpperCamelCase__ )},
] , )
@require_torch
def __snake_case ( self ):
A__ : Tuple = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ : Tuple = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
A__ : Dict = pipeline(
'''video-classification''' , model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , frame_sampling_rate=4 )
A__ : Dict = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
A__ : List[str] = video_classifier(UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
A__ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 55
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 1
|
import random
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = a[left_index]
A__ : Any = left_index + 1
for j in range(left_index + 1 , __UpperCamelCase ):
if a[j] < pivot:
A__ , A__ : Dict = a[i], a[j]
i += 1
A__ , A__ : int = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ) -> str:
"""simple docstring"""
if left < right:
A__ : str = random.randint(__UpperCamelCase , right - 1 )
A__ , A__ : int = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A__ : Union[str, Any] = partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
quick_sort_random(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__UpperCamelCase , pivot_index + 1 , __UpperCamelCase ) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] = input('''Enter numbers separated by a comma:\n''' ).strip()
A__ : str = [int(__UpperCamelCase ) for item in user_input.split(''',''' )]
quick_sort_random(__UpperCamelCase , 0 , len(__UpperCamelCase ) )
print(__UpperCamelCase )
if __name__ == "__main__":
main()
| 55
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Tuple = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
_SCREAMING_SNAKE_CASE : Tuple = parse_flag_from_env('RUN_REMOTE', default=False)
_SCREAMING_SNAKE_CASE : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
_SCREAMING_SNAKE_CASE : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
_SCREAMING_SNAKE_CASE : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
_SCREAMING_SNAKE_CASE : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
_SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
_SCREAMING_SNAKE_CASE : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
_SCREAMING_SNAKE_CASE : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
_SCREAMING_SNAKE_CASE : Any = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
A__ : List[str] = unittest.skip('''test requires faiss''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
A__ : Optional[int] = unittest.skip('''test requires regex''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
A__ : Optional[int] = unittest.skip('''test requires elasticsearch''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
A__ : Tuple = unittest.skip('''test requires sqlalchemy''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
A__ : Optional[Any] = unittest.skip('''test requires PyTorch''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
if not config.TF_AVAILABLE:
A__ : str = unittest.skip('''test requires TensorFlow''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
A__ : Optional[Any] = unittest.skip('''test requires JAX''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
if not config.PIL_AVAILABLE:
A__ : Dict = unittest.skip('''test requires Pillow''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__UpperCamelCase )
else:
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__UpperCamelCase )
else:
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__UpperCamelCase )
else:
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__UpperCamelCase : List[Any] ):
try:
import spacy # noqa F401
spacy.load(__UpperCamelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(__UpperCamelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__UpperCamelCase ) )(__UpperCamelCase )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__UpperCamelCase )
else:
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__UpperCamelCase )
else:
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
A__ : Tuple = unittest.skip('''test is slow''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
A__ : int = unittest.skip('''test is local''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
A__ : Tuple = unittest.skip('''test is packaged''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
A__ : Any = unittest.skip('''test requires remote''' )(__UpperCamelCase )
return test_case
def SCREAMING_SNAKE_CASE ( *__UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(__UpperCamelCase ) and name.startswith('''test''' ):
for decorator in decorators:
A__ : Optional[Any] = decorator(__UpperCamelCase )
setattr(cls , __UpperCamelCase , __UpperCamelCase )
return cls
return decorate
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@contextmanager
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int=OfflineSimulationMode.CONNECTION_FAILS , __UpperCamelCase : Optional[int]=1e-16 ) -> Dict:
"""simple docstring"""
A__ : Optional[int] = requests.Session().request
def timeout_request(__UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , **__UpperCamelCase : Dict ):
# Change the url to an invalid url so that the connection hangs
A__ : List[str] = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
A__ : Optional[Any] = timeout
try:
return online_request(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A__ : List[str] = url
A__ : Optional[Any] = e.args[0]
A__ : Union[str, Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , F"OfflineMock[{url}]" ),)
A__ : List[str] = (max_retry_error,)
raise
def raise_connection_error(__UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , **__UpperCamelCase : Any ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __UpperCamelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def SCREAMING_SNAKE_CASE ( *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCamelCase , **__UpperCamelCase ) as tmp_dir:
try:
os.chdir(__UpperCamelCase )
yield
finally:
os.chdir(__UpperCamelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
A__ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
import gc
gc.collect()
A__ : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
return deepcopy(__UpperCamelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCamelCase ).integers(0 , 1_00 , 10 ).tolist()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCamelCase : Tuple , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ):
try:
return func(*__UpperCamelCase , **__UpperCamelCase )
except HTTPError as err:
if str(__UpperCamelCase ).startswith('''500''' ) or str(__UpperCamelCase ).startswith('''502''' ):
pytest.xfail(str(__UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCamelCase )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = returncode
A__ : List[str] = stdout
A__ : Tuple = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
while True:
A__ : int = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : Optional[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : Any = []
A__ : Dict = []
def tee(__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]="" ):
A__ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=1_80 , __UpperCamelCase : str=False , __UpperCamelCase : int=True ) -> _RunOutput:
"""simple docstring"""
A__ : str = asyncio.get_event_loop()
A__ : List[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : List[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Any = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
A__ : int = re.sub(R'''^gw''' , '''''' , __UpperCamelCase , 0 , re.M )
return int(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ : List[Any] = 2_95_00
A__ : str = pytest_xdist_worker_id()
return port + uniq_delta
| 55
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
A__ : int = len(__UpperCamelCase )
A__ : Union[str, Any] = len(__UpperCamelCase )
# declaring the array for storing the dp values
A__ : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A__ : Any = 1 if x[i - 1] == y[j - 1] else 0
A__ : Tuple = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A__ : Optional[int] = ''''''
A__ , A__ : List[str] = m, n
while i > 0 and j > 0:
A__ : List[str] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A__ : Optional[int] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = 'AGGTAB'
_SCREAMING_SNAKE_CASE : Optional[int] = 'GXTXAYB'
_SCREAMING_SNAKE_CASE : Optional[Any] = 4
_SCREAMING_SNAKE_CASE : int = 'GTAB'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 55
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10 , __UpperCamelCase : int = 10_00 , __UpperCamelCase : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> None:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__UpperCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
A__ : List[str] = lower
A__ : List[Any] = higher
A__ : List[Any] = []
while True:
A__ : Tuple = get_avg(__UpperCamelCase , __UpperCamelCase )
last_numbers.append(__UpperCamelCase )
if answer(__UpperCamelCase ) == "low":
A__ : int = number
elif answer(__UpperCamelCase ) == "high":
A__ : int = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ : Optional[int] = int(input('''Enter lower value : ''' ).strip() )
A__ : Tuple = int(input('''Enter high value : ''' ).strip() )
A__ : Optional[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_SCREAMING_SNAKE_CASE : Any = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__="binary" , UpperCamelCase__=None ):
A__ : Optional[Any] = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=None , UpperCamelCase__=2 , ):
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Dict = image_size
A__ : int = patch_size
A__ : Any = num_channels
A__ : List[Any] = is_training
A__ : Optional[Any] = use_labels
A__ : str = hidden_size
A__ : int = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : Tuple = intermediate_size
A__ : str = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : Optional[int] = scope
A__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ : List[Any] = (image_size // patch_size) ** 2
A__ : Optional[int] = num_patches + 1
def __snake_case ( self ):
A__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : str = None
if self.use_labels:
A__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Tuple = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ : Optional[Any] = 1
A__ : str = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = self.type_sequence_label_size
A__ : str = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ : Union[str, Any] = 1
A__ : int = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ):
A__ : List[Any] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) : Union[str, Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : Union[str, Any] = ViTModelTester(self )
A__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ , A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] = model_class(UpperCamelCase__ )
A__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Any = [*signature.parameters.keys()]
A__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Any = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCamelCase__ )
A__ : List[str] = self.default_image_processor
A__ : Dict = prepare_img()
A__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
A__ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def __snake_case ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A__ : Optional[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCamelCase__ )
A__ : List[str] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
A__ : Optional[Any] = prepare_img()
A__ : int = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
A__ : str = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : int = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
A__ : Dict = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A__ : int = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __snake_case ( self ):
A__ : Any = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
A__ : Any = self.default_image_processor
A__ : Optional[Any] = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
A__ : Dict = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ : List[Any] = model(UpperCamelCase__ )
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(__UpperCamelCase , __UpperCamelCase )
A__ : Any = path + '''.py'''
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
inspect_metric(__UpperCamelCase , __UpperCamelCase )
A__ : List[str] = path + '''.py'''
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) -> int:
"""simple docstring"""
A__ : Tuple = get_dataset_config_info(__UpperCamelCase , config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase , config_name=__UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict ) -> int:
"""simple docstring"""
A__ : Any = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
A__ : int = expected_configs[0]
assert expected_config in infos
A__ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
A__ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase , config_name=__UpperCamelCase )
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = '▁'
_SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
_SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Tuple = vocab_file
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.sp_model.get_piece_size()
def __snake_case ( self ):
A__ : Dict = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[str] = None
return state
def __setstate__( self , UpperCamelCase__ ):
A__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ : int = {}
A__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.piece_to_id(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
if index < self.sp_model.get_piece_size():
A__ : Dict = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = []
A__ : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
A__ : Any = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : str = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_SCREAMING_SNAKE_CASE : str = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
_SCREAMING_SNAKE_CASE : int = dataset.iloc[:, 1:2].values
_SCREAMING_SNAKE_CASE : str = dataset.iloc[:, 2].values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = train_test_split(X, y, test_size=0.2, random_state=0)
_SCREAMING_SNAKE_CASE : int = PolynomialFeatures(degree=4)
_SCREAMING_SNAKE_CASE : Optional[int] = poly_reg.fit_transform(X)
_SCREAMING_SNAKE_CASE : List[str] = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
plt.scatter(__UpperCamelCase , __UpperCamelCase , color='''red''' )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
return math.sqrt(__UpperCamelCase ) * math.sqrt(__UpperCamelCase ) == num
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[Any] = n
while left <= right:
A__ : Dict = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
A__ : Optional[Any] = mid - 1
else:
A__ : str = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = TransfoXLTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
super().setUp()
A__ : List[Any] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCamelCase__ ):
A__ : List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = '''<unk> UNwanted , running'''
A__ : Optional[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __snake_case ( self ):
A__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(UpperCamelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [0, 4, 8, 7] )
def __snake_case ( self ):
A__ : int = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __snake_case ( self ):
A__ : List[Any] = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Tuple = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
A__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
A__ : str = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCamelCase__ ) , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = self.get_tokenizer()
A__ : int = len(UpperCamelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCamelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
import os
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
A__ : int = os.path.join(__UpperCamelCase , '''triangle.txt''' )
with open(__UpperCamelCase ) as f:
A__ : Dict = f.readlines()
A__ : Optional[Any] = []
for line in triangle:
A__ : Optional[Any] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=19 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
A__ : Optional[Any] = parent
A__ : Any = batch_size
A__ : Union[str, Any] = seq_length
A__ : Dict = is_training
A__ : int = use_input_mask
A__ : List[Any] = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : int = vocab_size
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : int = intermediate_size
A__ : List[Any] = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : Optional[Any] = type_vocab_size
A__ : Dict = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : Union[str, Any] = num_labels
A__ : List[Any] = num_choices
A__ : Any = scope
def __snake_case ( self ):
A__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
A__ : Optional[int] = None
A__ : Optional[int] = None
if self.use_labels:
A__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int = ids_tensor([self.batch_size] , self.num_choices )
A__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ):
A__ : Optional[int] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCamelCase__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A__ : Any = model(UpperCamelCase__ )
A__ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __snake_case ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = False
_lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCAmelCase = ()
_lowerCAmelCase = {} if is_torch_available() else {}
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : Optional[Any] = EsmFoldModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip('''Does not support attention outputs''' )
def __snake_case ( self ):
pass
@unittest.skip
def __snake_case ( self ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __snake_case ( self ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __snake_case ( self ):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __snake_case ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self ):
pass
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
def __snake_case ( self ):
A__ : Any = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
A__ : List[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ : List[Any] = model(UpperCamelCase__ )['''positions''']
A__ : Optional[int] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCamelCase__ , atol=1e-4 ) )
| 55
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "camembert"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Dict = vocab_size
A__ : Dict = hidden_size
A__ : Dict = num_hidden_layers
A__ : str = num_attention_heads
A__ : List[Any] = hidden_act
A__ : Dict = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : int = type_vocab_size
A__ : List[str] = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : Union[str, Any] = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : str = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
A__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = '▁'
_SCREAMING_SNAKE_CASE : Any = {'vocab_file': 'spiece.model'}
_SCREAMING_SNAKE_CASE : str = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'google/pegasus-xsum': 5_1_2,
}
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<mask_2>" , UpperCamelCase__="<mask_1>" , UpperCamelCase__=None , UpperCamelCase__=103 , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"
F" {type(UpperCamelCase__ )}" )
A__ : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
A__ : Optional[Any] = additional_special_tokens_extended
else:
A__ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
A__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : List[Any] = mask_token_sent
A__ : Any = vocab_file
A__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# add special tokens to encoder dict
A__ : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A__ : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __snake_case ( self ):
return len(self.sp_model ) + self.offset
def __snake_case ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ : List[str] = self.__dict__.copy()
A__ : str = None
return state
def __setstate__( self , UpperCamelCase__ ):
A__ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ : int = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ : str = self.sp_model.piece_to_id(UpperCamelCase__ )
return sp_id + self.offset
def __snake_case ( self , UpperCamelCase__ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = []
A__ : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
A__ : int = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __snake_case ( self , UpperCamelCase__=False ):
return 1
def __snake_case ( self , UpperCamelCase__ ):
A__ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
A__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 55
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 1
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , **__UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
A__ : int = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ : int = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : List[str] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
A__ : Tuple = parent
A__ : Tuple = batch_size
A__ : Tuple = seq_length
A__ : Union[str, Any] = is_training
A__ : Any = use_input_mask
A__ : Union[str, Any] = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : Dict = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : str = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Union[str, Any] = hidden_act
A__ : int = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : List[str] = type_sequence_label_size
A__ : Union[str, Any] = initializer_range
A__ : List[Any] = num_labels
A__ : List[str] = num_choices
A__ : Dict = scope
A__ : List[Any] = embedding_size
def __snake_case ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Optional[int] = None
A__ : Union[str, Any] = None
A__ : Optional[int] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = TFMobileBertModel(config=UpperCamelCase__ )
A__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : List[str] = model(UpperCamelCase__ )
A__ : List[Any] = [input_ids, input_mask]
A__ : Optional[Any] = model(UpperCamelCase__ )
A__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
A__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
A__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = TFMobileBertForPreTraining(config=UpperCamelCase__ )
A__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = self.num_labels
A__ : int = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
A__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = self.num_choices
A__ : str = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
A__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = self.num_labels
A__ : Optional[int] = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
A__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
A__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[int] = config_and_inputs
A__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self ):
A__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A__ : int = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
A__ : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ : List[str] = model(UpperCamelCase__ )[0]
A__ : int = [1, 6, 3_0522]
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Union[str, Any] = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 55
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 1
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ : Optional[int] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Any:
"""simple docstring"""
A__ : Tuple = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[Any] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : Optional[int] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] = '''imagenet-1k-id2label.json'''
A__ : str = 10_00
A__ : str = '''huggingface/label-files'''
A__ : Union[str, Any] = num_labels
A__ : Tuple = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
A__ : List[str] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : int = idalabel
A__ : Optional[int] = {v: k for k, v in idalabel.items()}
A__ : int = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
A__ : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
A__ : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ : List[str] = [2, 2, 20]
A__ : Optional[int] = [3, 12, 16]
A__ : List[str] = [1_92, 7_68, 10_24]
A__ : Dict = CvtForImageClassification(__UpperCamelCase )
A__ : List[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
A__ : Union[str, Any] = image_size
A__ : List[Any] = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
A__ : Any = OrderedDict()
A__ : Dict = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ : int = list_of_state_dict + cls_token(__UpperCamelCase )
A__ : List[str] = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
A__ : int = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
A__ : Dict = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
A__ : Any = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 55
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=True , ):
A__ : Optional[Any] = parent
A__ : List[str] = batch_size
A__ : Any = num_channels
A__ : Any = image_size
A__ : Tuple = min_resolution
A__ : Optional[int] = max_resolution
A__ : int = do_resize
A__ : Union[str, Any] = size_divisor
A__ : str = do_rescale
def __snake_case ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def __snake_case ( self ):
A__ : Optional[Any] = GLPNImageProcessingTester(self )
@property
def __snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self ):
A__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''resample''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_rescale''' ) )
def __snake_case ( self ):
pass
def __snake_case ( self ):
# Initialize image_processing
A__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self ):
# Initialize image_processing
A__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "megatron-bert"
def __init__( self , UpperCamelCase__=2_9056 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[Any] = vocab_size
A__ : Tuple = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : str = num_attention_heads
A__ : Dict = hidden_act
A__ : Dict = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : Optional[int] = position_embedding_type
A__ : str = use_cache
| 55
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 1
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[Any] = tempfile.mkdtemp()
A__ : Union[str, Any] = BlipImageProcessor()
A__ : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
A__ : Any = BlipaProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A__ : Any = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : int = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.get_image_processor()
A__ : List[str] = self.get_tokenizer()
A__ : List[Any] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ : Any = self.prepare_image_inputs()
A__ : Dict = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : Tuple = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : str = self.get_image_processor()
A__ : Tuple = self.get_tokenizer()
A__ : Any = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ : List[Any] = '''lower newer'''
A__ : Tuple = processor(text=UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ):
A__ : Any = self.get_image_processor()
A__ : int = self.get_tokenizer()
A__ : List[str] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ : List[Any] = '''lower newer'''
A__ : Tuple = self.prepare_image_inputs()
A__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
A__ : Union[str, Any] = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : Tuple = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
A__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.get_image_processor()
A__ : List[Any] = self.get_tokenizer()
A__ : List[Any] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ : str = '''lower newer'''
A__ : List[str] = self.prepare_image_inputs()
A__ : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
_SCREAMING_SNAKE_CASE : Union[str, Any] = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(__UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : int = {}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ):
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ : Dict = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
A__ : str = []
def __snake_case ( self ):
return list(self.graph )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
if s == d:
return []
A__ : Union[str, Any] = []
A__ : int = []
if s == -2:
A__ : Union[str, Any] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ : Optional[int] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : List[str] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __snake_case ( self , UpperCamelCase__=-1 ):
if c == -1:
A__ : List[str] = floor(random() * 1_0000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __snake_case ( self , UpperCamelCase__=-2 ):
A__ : int = deque()
A__ : Tuple = []
if s == -2:
A__ : int = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __snake_case ( self , UpperCamelCase__ ):
return len(self.graph[u] )
def __snake_case ( self , UpperCamelCase__=-2 ):
A__ : Optional[Any] = []
A__ : str = []
if s == -2:
A__ : str = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Union[str, Any] = s
A__ : Tuple = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
A__ : Optional[Any] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Dict = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def __snake_case ( self ):
A__ : int = []
A__ : int = []
A__ : Any = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Any = -2
A__ : Dict = []
A__ : Optional[Any] = s
A__ : Optional[Any] = False
A__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : str = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : List[Any] = True
if len(UpperCamelCase__ ) != 0:
A__ : List[str] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Any = False
indirect_parents.append(UpperCamelCase__ )
A__ : Tuple = s
A__ : Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = []
A__ : Dict = []
A__ : Optional[int] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Any = -2
A__ : Tuple = []
A__ : List[str] = s
A__ : Union[str, Any] = False
A__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Tuple = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Optional[Any] = True
if len(UpperCamelCase__ ) != 0:
A__ : Union[str, Any] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Dict = False
indirect_parents.append(UpperCamelCase__ )
A__ : Union[str, Any] = s
A__ : str = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __snake_case ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
A__ : int = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = time()
return end - begin
def __snake_case ( self , UpperCamelCase__=-2 ):
A__ : List[str] = time()
self.bfs(UpperCamelCase__ )
A__ : List[str] = time()
return end - begin
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : int = {}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ):
# check if the u exists
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ : Any = [[w, u]]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
if s == d:
return []
A__ : Optional[Any] = []
A__ : int = []
if s == -2:
A__ : List[Any] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
A__ : List[str] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : str = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def __snake_case ( self , UpperCamelCase__=-1 ):
if c == -1:
A__ : Dict = floor(random() * 1_0000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def __snake_case ( self , UpperCamelCase__=-2 ):
A__ : List[str] = deque()
A__ : str = []
if s == -2:
A__ : str = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
A__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __snake_case ( self , UpperCamelCase__ ):
return len(self.graph[u] )
def __snake_case ( self ):
A__ : List[str] = []
A__ : Optional[int] = []
A__ : Dict = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : List[Any] = -2
A__ : Dict = []
A__ : str = s
A__ : str = False
A__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Optional[int] = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : str = True
if len(UpperCamelCase__ ) != 0:
A__ : str = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Optional[Any] = False
indirect_parents.append(UpperCamelCase__ )
A__ : Optional[int] = s
A__ : Optional[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = []
A__ : Any = []
A__ : Optional[int] = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
A__ : List[Any] = -2
A__ : List[str] = []
A__ : Optional[Any] = s
A__ : Optional[Any] = False
A__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Any = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Tuple = True
if len(UpperCamelCase__ ) != 0:
A__ : List[Any] = stack[len(UpperCamelCase__ ) - 1]
else:
A__ : Union[str, Any] = False
indirect_parents.append(UpperCamelCase__ )
A__ : Union[str, Any] = s
A__ : Union[str, Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def __snake_case ( self ):
return list(self.graph )
def __snake_case ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ):
A__ : List[str] = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any = time()
return end - begin
def __snake_case ( self , UpperCamelCase__=-2 ):
A__ : Union[str, Any] = time()
self.bfs(UpperCamelCase__ )
A__ : Optional[int] = time()
return end - begin
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0xE000
_SCREAMING_SNAKE_CASE : int = 0xE001
_SCREAMING_SNAKE_CASE : List[str] = 0xE002
_SCREAMING_SNAKE_CASE : List[str] = 0xE003
_SCREAMING_SNAKE_CASE : int = 0xE004
# Maps special codepoints to human-readable names.
_SCREAMING_SNAKE_CASE : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_SCREAMING_SNAKE_CASE : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=False , UpperCamelCase__=2048 , **UpperCamelCase__ , ):
A__ : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A__ : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
A__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A__ : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A__ : Optional[int] = UNICODE_VOCAB_SIZE
A__ : Dict = len(self._special_codepoints )
@property
def __snake_case ( self ):
return self._unicode_vocab_size
def __snake_case ( self , UpperCamelCase__ ):
return list(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(F"invalid token: '{token}'" )
def __snake_case ( self , UpperCamelCase__ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(F"invalid id: {index}" )
def __snake_case ( self , UpperCamelCase__ ):
return "".join(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = [self.sep_token_id]
A__ : Union[str, Any] = [self.cls_token_id]
A__ : int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
A__ : Tuple = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Optional[int] = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
A__ : List[str] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
return ()
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
@require_torch
def __snake_case ( self ):
A__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
A__ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A__ : Optional[int] = bertabert.config.encoder.vocab_size
A__ : str = tokenizer.sep_token_id
A__ : Any = tokenizer.cls_token_id
A__ : Optional[int] = 128
A__ : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
A__ : Any = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
A__ : Tuple = train_dataset.select(range(32 ) )
A__ : Any = val_dataset.select(range(16 ) )
A__ : List[Any] = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ : Dict = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=512 )
A__ : Any = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=128 )
A__ : Optional[Any] = inputs.input_ids
A__ : List[Any] = inputs.attention_mask
A__ : Optional[Any] = outputs.input_ids
A__ : List[Any] = outputs.input_ids.copy()
A__ : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A__ : List[Any] = outputs.attention_mask
assert all(len(UpperCamelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase__ ):
A__ : int = pred.label_ids
A__ : Dict = pred.predictions
# all unnecessary tokens are removed
A__ : List[str] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase__ ) )] ) / len(UpperCamelCase__ )
return {"accuracy": accuracy}
# map train dataset
A__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
A__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
A__ : Any = self.get_auto_remove_tmp_dir()
A__ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase__ , per_device_train_batch_size=UpperCamelCase__ , per_device_eval_batch_size=UpperCamelCase__ , predict_with_generate=UpperCamelCase__ , evaluation_strategy='''steps''' , do_train=UpperCamelCase__ , do_eval=UpperCamelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ : List[str] = SeqaSeqTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# start training
trainer.train()
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
A__ : Union[str, Any] = len(__UpperCamelCase )
while lo < hi:
A__ : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
A__ : List[Any] = mid + 1
else:
A__ : Optional[int] = mid
return lo
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
A__ : Dict = len(__UpperCamelCase )
while lo < hi:
A__ : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
A__ : Dict = mid + 1
else:
A__ : int = mid
return lo
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
A__ : List[Any] = 0
A__ : Union[str, Any] = len(__UpperCamelCase ) - 1
while left <= right:
A__ : Dict = left + (right - left) // 2
A__ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
A__ : Optional[Any] = midpoint - 1
else:
A__ : str = midpoint + 1
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
A__ : Dict = bisect.bisect_left(__UpperCamelCase , __UpperCamelCase )
if index != len(__UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
if right < left:
return None
A__ : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , midpoint + 1 , __UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input('Enter numbers separated by comma:\n').strip()
_SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
_SCREAMING_SNAKE_CASE : List[Any] = int(input('Enter a single number to be found in the list:\n'))
_SCREAMING_SNAKE_CASE : Tuple = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict ) -> bool:
"""simple docstring"""
A__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A__ : set[int] = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : int , __UpperCamelCase : set , __UpperCamelCase : set ) -> bool:
"""simple docstring"""
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["image_processor", "tokenizer"]
_lowerCAmelCase = "BlipImageProcessor"
_lowerCAmelCase = "AutoTokenizer"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
# add QFormer tokenizer
A__ : Tuple = qformer_tokenizer
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
A__ : Optional[int] = BatchFeature()
if text is not None:
A__ : str = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
encoding.update(UpperCamelCase__ )
A__ : str = self.qformer_tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Tuple = qformer_text_encoding.pop('''input_ids''' )
A__ : str = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
A__ : List[str] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self ):
A__ : Optional[Any] = self.tokenizer.model_input_names
A__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __snake_case ( self , UpperCamelCase__ , **UpperCamelCase__ ):
if os.path.isfile(UpperCamelCase__ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ : List[str] = os.path.join(UpperCamelCase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(UpperCamelCase__ )
return super().save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __snake_case ( cls , UpperCamelCase__ , **UpperCamelCase__ ):
A__ : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ , subfolder='''qformer_tokenizer''' )
A__ : Optional[int] = cls._get_arguments_from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
args.append(UpperCamelCase__ )
return cls(*UpperCamelCase__ )
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Tuple = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase__ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
super().__init__()
A__ : Dict = model
A__ : Union[str, Any] = 2
A__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __snake_case ( self ):
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
A__ : List[Any] = LongformerModel.from_pretrained(__UpperCamelCase )
A__ : Optional[int] = LightningModel(__UpperCamelCase )
A__ : List[Any] = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
A__ : Dict = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__UpperCamelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 55
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A__ : Optional[int] = deprecated_arg[3:]
A__ : List[str] = not kwargs.pop(UpperCamelCase__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
A__ : str = kwargs.pop('''tpu_name''' , self.tpu_name )
A__ : int = kwargs.pop('''device_idx''' , self.device_idx )
A__ : Optional[int] = kwargs.pop('''eager_mode''' , self.eager_mode )
A__ : str = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase__ )
_lowerCAmelCase = field(
default=SCREAMING_SNAKE_CASE_, metadata={"help": "Name of TPU"}, )
_lowerCAmelCase = field(
default=0, metadata={"help": "CPU / GPU device index. Defaults to 0."}, )
_lowerCAmelCase = field(default=SCREAMING_SNAKE_CASE_, metadata={"help": "Benchmark models in eager model."} )
_lowerCAmelCase = field(
default=SCREAMING_SNAKE_CASE_, metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
}, )
@cached_property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
A__ : List[str] = None
if self.tpu:
try:
if self.tpu_name:
A__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A__ : List[Any] = None
return tpu
@cached_property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
A__ : Any = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
A__ : str = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case ( self ):
return self.n_gpu > 0
| 55
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
A__ : List[Any] = set()
# Replace all the whitespace in our sentence
A__ : Optional[int] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCamelCase ) == 26
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
A__ : int = [False] * 26
for char in input_str:
if char.islower():
A__ : Tuple = True
elif char.isupper():
A__ : Union[str, Any] = True
return all(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
A__ : str = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=__UpperCamelCase ) )
print(timeit('''is_pangram_faster()''' , setup=__UpperCamelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=__UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 55
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
A__ , A__ : Optional[int] = image.size
A__ , A__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ : Optional[int] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
A__ : Tuple = np.array(__UpperCamelCase ).astype(np.floataa ) / 2_5_5.0
A__ : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
A__ : List[str] = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
A__ : str = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
A__ : Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}" )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
A__ : Any = preprocess(UpperCamelCase__ )
A__ , A__ : List[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A__ : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
A__ : List[str] = next(self.unet.parameters() ).dtype
A__ : str = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
A__ : Dict = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
A__ : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Dict = {}
if accepts_eta:
A__ : Tuple = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
A__ : List[str] = torch.cat([latents, image] , dim=1 )
A__ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
A__ : List[str] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ : Dict = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
A__ : int = self.vqvae.decode(UpperCamelCase__ ).sample
A__ : Union[str, Any] = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
A__ : Optional[int] = image / 2 + 0.5
A__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : List[str] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : int=False ) -> Any:
"""simple docstring"""
A__ : str = '''backbone.''' if is_semantic else ''''''
A__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : str=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A__ : Optional[Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
A__ : str = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
A__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
A__ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
A__ : Dict = in_proj_weight[
: config.hidden_size, :
]
A__ : int = q_bias
A__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
A__ : Any = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
A__ : Dict = gamma_a
A__ : str = gamma_a
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
A__ : int = dct.pop(__UpperCamelCase )
A__ : Any = val
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str=False ) -> str:
"""simple docstring"""
A__ : List[str] = False if '''rvlcdip''' in checkpoint_url else True
A__ : int = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ : Union[str, Any] = 10_24
A__ : List[Any] = 40_96
A__ : Any = 24
A__ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ : Tuple = 16
A__ : Union[str, Any] = '''huggingface/label-files'''
A__ : int = '''rvlcdip-id2label.json'''
A__ : Dict = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : int = idalabel
A__ : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ : int = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )['''model''']
A__ : List[str] = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ : Optional[Any] = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ : Optional[int] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ : Optional[Any] = prepare_img()
A__ : Any = image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
A__ : List[str] = encoding['''pixel_values''']
A__ : Union[str, Any] = model(__UpperCamelCase )
A__ : Dict = outputs.logits
# verify logits
A__ : str = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ : int = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
A__ : Dict = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
A__ : str = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
A__ : Dict = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ : str = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A__ : Optional[Any] = FlaxBertModel.from_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def __snake_case ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ : Any = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A__ : List[str] = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
A__ : Optional[int] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def __snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
A__ : Union[str, Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ : Dict = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' )
def __snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A__ : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCamelCase__ , '''Use `from_pt=True` to load this model''' ):
A__ : Optional[int] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
A__ , A__ : List[str] = 1, 1
A__ : List[Any] = []
for i in range(1 , n + 1 ):
A__ : List[str] = prev_numerator + 2 * prev_denominator
A__ : Tuple = prev_numerator + prev_denominator
if len(str(__UpperCamelCase ) ) > len(str(__UpperCamelCase ) ):
result.append(__UpperCamelCase )
A__ : Optional[Any] = numerator
A__ : Any = denominator
return len(__UpperCamelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 1
|
_SCREAMING_SNAKE_CASE : str = [0, 2, 4, 6, 8]
_SCREAMING_SNAKE_CASE : int = [1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A__ : Tuple = 0
for digit in range(10 ):
A__ : int = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCamelCase , __UpperCamelCase )
return result
A__ : List[str] = 0
for digita in range(10 ):
A__ : List[Any] = digita
if (remainder + digita) % 2 == 0:
A__ : str = ODD_DIGITS
else:
A__ : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
A__ : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCamelCase , __UpperCamelCase , )
return result
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 9 ) -> int:
"""simple docstring"""
A__ : Any = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
A__ : int = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple = set()
for token in tokens:
A__ : Any = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A__ : Any = list(__UpperCamelCase )
return word_list
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : set() ) -> Union[str, Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A__ : Any = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A__ : Dict = bert_tokens
A__ , A__ : Optional[Any] = 0, len(__UpperCamelCase )
while start < end:
A__ : Dict = True
if is_chinese(bert_word[start] ):
A__ : Tuple = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
A__ : List[str] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ : Dict = '''##''' + bert_word[j]
A__ : str = start + i
A__ : Tuple = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : LTP , __UpperCamelCase : BertTokenizer ) -> Tuple:
"""simple docstring"""
A__ : Optional[Any] = []
for i in range(0 , len(__UpperCamelCase ) , 1_00 ):
A__ : str = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
A__ : List[Any] = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ : str = []
for i in range(0 , len(__UpperCamelCase ) , 1_00 ):
A__ : Dict = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ : Tuple = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
A__ : Optional[int] = []
for id in input_ids:
A__ : List[str] = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A__ : List[Any] = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A__ : List[str] = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
A__ : Any = f.readlines()
A__ : List[str] = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ : List[Any] = LTP(args.ltp ) # faster in GPU device
A__ : List[Any] = BertTokenizer.from_pretrained(args.bert )
A__ : Optional[Any] = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
A__ : Optional[Any] = [json.dumps(__UpperCamelCase ) + '''\n''' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
main(args)
| 55
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE : Union[str, Any] = namedtuple('covid_data', 'cases deaths recovered')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
A__ : str = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) )
_SCREAMING_SNAKE_CASE : Tuple = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 55
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Namespace ) -> List[str]:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_SCREAMING_SNAKE_CASE : List[str] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
def __snake_case ( UpperCamelCase__ ):
A__ : Dict = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=UpperCamelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , ):
A__ : List[Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
A__ : Tuple = model_type
A__ : List[str] = tf_checkpoint
A__ : List[Any] = pytorch_dump_output
A__ : Dict = config
A__ : Any = finetuning_task_name
def __snake_case ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
A__ : Tuple = self._tf_checkpoint
A__ : Optional[Any] = ''''''
else:
A__ : List[str] = self._tf_checkpoint
A__ : Tuple = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase__ , self._config , self._pytorch_dump_output , UpperCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 55
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE : Any = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ : int = torch.load(__UpperCamelCase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=rename_keys_prefix ) -> int:
"""simple docstring"""
A__ : Dict = OrderedDict()
A__ : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[int] = key
for name_pair in rename_keys_prefix:
A__ : Dict = new_key.replace(name_pair[0] , name_pair[1] )
A__ : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : str = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
A__ : str = '''pretraining'''
if "vcr" in checkpoint_path:
A__ : int = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
A__ : List[str] = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
A__ : Optional[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
A__ : Any = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
A__ : int = {'''visual_embedding_dim''': 5_12}
A__ : str = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A__ : Union[str, Any] = {'''visual_embedding_dim''': 20_48}
A__ : List[Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A__ : Tuple = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
A__ : Optional[int] = '''vqa'''
elif "nlvr" in checkpoint_path:
A__ : Dict = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
A__ : Optional[int] = '''nlvr'''
A__ : str = VisualBertConfig(**__UpperCamelCase )
# Load State Dict
A__ : int = load_state_dict(__UpperCamelCase )
A__ : Optional[Any] = get_new_dict(__UpperCamelCase , __UpperCamelCase )
if model_type == "pretraining":
A__ : Optional[int] = VisualBertForPreTraining(__UpperCamelCase )
elif model_type == "vqa":
A__ : Union[str, Any] = VisualBertForQuestionAnswering(__UpperCamelCase )
elif model_type == "nlvr":
A__ : Tuple = VisualBertForVisualReasoning(__UpperCamelCase )
elif model_type == "multichoice":
A__ : List[str] = VisualBertForMultipleChoice(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Save Checkpoints
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = LayoutLMTokenizer
_lowerCAmelCase = LayoutLMTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
def __snake_case ( self ):
super().setUp()
A__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCamelCase__ ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = '''UNwant\u00E9d,running'''
A__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __snake_case ( self ):
A__ : List[str] = self.tokenizer_class(self.vocab_file )
A__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __snake_case ( self ):
pass
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=2 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , ):
A__ : Dict = parent
A__ : Tuple = batch_size
A__ : int = patch_size
A__ : int = max_length
A__ : Tuple = num_mel_bins
A__ : List[Any] = is_training
A__ : List[Any] = use_labels
A__ : Any = hidden_size
A__ : Optional[int] = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : List[str] = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : Dict = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : List[str] = scope
A__ : List[Any] = frequency_stride
A__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A__ : int = (self.max_length - self.patch_size) // self.time_stride + 1
A__ : Any = frequency_out_dimension * time_out_dimension
A__ : Dict = num_patches + 2
def __snake_case ( self ):
A__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A__ : Tuple = None
if self.use_labels:
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = self.get_config()
return config, input_values, labels
def __snake_case ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Optional[int] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __snake_case ( self ):
A__ : Union[str, Any] = ASTModelTester(self )
A__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Tuple = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = model_class(UpperCamelCase__ )
A__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Dict = [*signature.parameters.keys()]
A__ : List[Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
A__ , A__ : List[Any] = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def __snake_case ( self ):
A__ : Union[str, Any] = self.default_feature_extractor
A__ : Optional[Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCamelCase__ )
A__ : str = self.default_feature_extractor
A__ , A__ : Optional[int] = prepare_audio()
A__ : Optional[Any] = audio.squeeze().numpy()
A__ : Any = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[Any] = model(**UpperCamelCase__ )
# verify the logits
A__ : Optional[int] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : str = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : Dict = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'roberta-base': 5_1_2,
'roberta-large': 5_1_2,
'roberta-large-mnli': 5_1_2,
'distilroberta-base': 5_1_2,
'roberta-base-openai-detector': 5_1_2,
'roberta-large-openai-detector': 5_1_2,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = RobertaTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : str = add_prefix_space
A__ : int = pre_tok_class(**UpperCamelCase__ )
A__ : Any = add_prefix_space
A__ : Dict = '''post_processor'''
A__ : str = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
A__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : Tuple = tuple(state['''sep'''] )
if "cls" in state:
A__ : Tuple = tuple(state['''cls'''] )
A__ : Any = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Dict = add_prefix_space
A__ : Optional[Any] = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
A__ : Tuple = trim_offsets
A__ : Union[str, Any] = True
if changes_to_apply:
A__ : Optional[Any] = getattr(UpperCamelCase__ , state.pop('''type''' ) )
A__ : Union[str, Any] = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def __snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
A__ : List[str] = value
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : List[str] = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : Tuple = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : List[str] = [self.sep_token_id]
A__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A__ : str = bs[:]
A__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
A__ : List[Any] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
A__ : Any = set()
A__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : int = char
return pairs
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
A__ : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A__ : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
A__ : Optional[int] = json.load(UpperCamelCase__ )
A__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
A__ : int = errors # how to handle errors in decoding
A__ : List[Any] = bytes_to_unicode()
A__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
A__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
A__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
A__ : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : List[Any] = {}
A__ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCamelCase__ ):
if token in self.cache:
return self.cache[token]
A__ : List[str] = tuple(UpperCamelCase__ )
A__ : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A__ : List[Any] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : str = bigram
A__ : str = []
A__ : List[str] = 0
while i < len(UpperCamelCase__ ):
try:
A__ : List[str] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : List[str] = tuple(UpperCamelCase__ )
A__ : Union[str, Any] = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A__ : Union[str, Any] = get_pairs(UpperCamelCase__ )
A__ : Optional[int] = ''' '''.join(UpperCamelCase__ )
A__ : Any = word
return word
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = []
for token in re.findall(self.pat , UpperCamelCase__ ):
A__ : int = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self , UpperCamelCase__ ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCamelCase__ ):
return self.decoder.get(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = ''''''.join(UpperCamelCase__ )
A__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : str = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : Tuple = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
A__ : Tuple = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
A__ : Optional[int] = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
A__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ):
A__ : int = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
A__ : List[Any] = ''' ''' + text
return (text, kwargs)
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
@property
def __snake_case ( self ):
return self.get_dummy_input()
@property
def __snake_case ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __snake_case ( self , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
A__ : Optional[int] = 4
A__ : List[Any] = 32
A__ : Union[str, Any] = (32, 32)
A__ : int = torch.manual_seed(0 )
A__ : str = torch.device(UpperCamelCase__ )
A__ : Optional[Any] = (batch_size, num_channels) + sizes
A__ : Tuple = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ )
A__ : Dict = {'''hidden_states''': hidden_states}
if include_temb:
A__ : Optional[Any] = 128
A__ : str = randn_tensor((batch_size, temb_channels) , generator=UpperCamelCase__ , device=UpperCamelCase__ )
if include_res_hidden_states_tuple:
A__ : Dict = torch.manual_seed(1 )
A__ : Any = (randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ ),)
if include_encoder_hidden_states:
A__ : str = floats_tensor((batch_size, 32, 32) ).to(UpperCamelCase__ )
if include_skip_sample:
A__ : Optional[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCamelCase__ , device=UpperCamelCase__ )
return dummy_input
def __snake_case ( self ):
A__ : Union[str, Any] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
A__ : Tuple = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
A__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : List[Any] = self.prepare_init_args_and_inputs_for_common()
A__ : Any = self.block_class(**UpperCamelCase__ )
unet_block.to(UpperCamelCase__ )
unet_block.eval()
with torch.no_grad():
A__ : int = unet_block(**UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
A__ : List[str] = output[0, -1, -3:, -3:]
A__ : Dict = torch.tensor(UpperCamelCase__ ).to(UpperCamelCase__ )
assert torch_all_close(output_slice.flatten() , UpperCamelCase__ , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __snake_case ( self ):
A__ , A__ : List[str] = self.prepare_init_args_and_inputs_for_common()
A__ : Any = self.block_class(**UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : Any = model(**UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = output[0]
A__ : Union[str, Any] = torch.device(UpperCamelCase__ )
A__ : str = randn_tensor(output.shape , device=UpperCamelCase__ )
A__ : List[str] = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_SCREAMING_SNAKE_CASE : Union[str, Any] = object()
# For specifying empty leaf dict `{}`
_SCREAMING_SNAKE_CASE : Optional[Any] = object()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
A__ : Optional[int] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
A__ : Tuple = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
def replace(__UpperCamelCase : Any , __UpperCamelCase : Tuple ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P('''mp''' , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
A__ : Optional[Any] = _get_partition_rules()
A__ : Dict = _replacement_rules(__UpperCamelCase )
A__ : Optional[int] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
A__ : Tuple = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ = None ):
if components is None:
A__ : Optional[int] = []
A__ : Dict = list(UpperCamelCase__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase__ ):
A__ : Optional[int] = len(self )
if size == len(UpperCamelCase__ ):
A__ : Optional[int] = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase__ ):
A__ : Optional[Any] = len(self )
if size == len(UpperCamelCase__ ):
A__ : List[Any] = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , (float, int) ):
A__ : Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A__ : Optional[int] = len(self )
A__ : List[Any] = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('''invalid operand!''' )
def __snake_case ( self ):
return Vector(self.__components )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
A__ : Tuple = value
def __snake_case ( self ):
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
A__ : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False ):
A__ : Dict = self * other
A__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (isinstance(__UpperCamelCase , __UpperCamelCase ))
A__ : Union[str, Any] = [0] * dimension
A__ : str = 1
return Vector(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : Vector , __UpperCamelCase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (isinstance(__UpperCamelCase , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : Optional[Any] = [random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = matrix
A__ : Optional[int] = w
A__ : Any = h
def __str__( self ):
A__ : Optional[int] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : Dict = []
for i in range(self.__height ):
A__ : Any = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : List[str] = []
for i in range(self.__height ):
A__ : int = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A__ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
A__ : Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A__ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def __snake_case ( self ):
return self.__height
def __snake_case ( self ):
return self.__width
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A__ : Any = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
A__ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A__ : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('''Indices out of bounds''' )
def __snake_case ( self ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
A__ : list[list[float]] = [[0] * n for _ in range(__UpperCamelCase )]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : list[list[float]] = [
[random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )
]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Optional[int] = BlipImageProcessor()
A__ : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
A__ : List[str] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
A__ : Union[str, Any] = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A__ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A__ : Dict = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : Optional[int] = self.get_qformer_tokenizer()
A__ : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = self.prepare_image_inputs()
A__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : List[str] = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : str = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = '''lower newer'''
A__ : Union[str, Any] = processor(text=UpperCamelCase__ )
A__ : List[Any] = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
A__ : Dict = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : int = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[int] = '''lower newer'''
A__ : Optional[int] = self.prepare_image_inputs()
A__ : Any = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Tuple = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Any = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : Optional[int] = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[Any] = '''lower newer'''
A__ : Any = self.prepare_image_inputs()
A__ : Dict = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def __snake_case ( self ):
super().setUp()
A__ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A__ : int = {}
A__ : Dict = {}
for i, value in enumerate(UpperCamelCase__ ):
A__ : int = i
A__ : Any = i
A__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self ):
A__ : List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Any = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self ):
A__ : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A__ : Union[str, Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
A__ : List[str] = i
A__ : Dict = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A__ : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ : Dict = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
A__ : Tuple = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , '''do_lower_case''' ) else False
A__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self ):
A__ : int = ['''的''', '''人''', '''有''']
A__ : Tuple = ''''''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] = True
A__ : List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[str] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = False
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : str = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : Tuple = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __snake_case ( self ):
A__ : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : str = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase__ )
A__ : List[str] = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
A__ : Union[str, Any] = '''你好,你是谁'''
A__ : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
A__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
A__ : int = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ )
A__ : Any = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ )
A__ : Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Any = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 55
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : Dict = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_SCREAMING_SNAKE_CASE : Optional[int] = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE : int = {f"""funnel-transformer/{name}""": 5_1_2 for name in _model_names}
_SCREAMING_SNAKE_CASE : Dict = {f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = FunnelTokenizer
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = 2
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__="##" , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , clean_text=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , wordpieces_prefix=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
A__ : Tuple = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
A__ : int = do_lower_case
A__ : Tuple = strip_accents
A__ : Any = tokenize_chinese_chars
A__ : Dict = normalizer_class(**UpperCamelCase__ )
A__ : Union[str, Any] = do_lower_case
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : int = [self.sep_token_id]
A__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 55
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ShapEPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 8
@property
def __snake_case ( self ):
A__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
A__ : int = PriorTransformer(**UpperCamelCase__ )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : int = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
A__ : Any = ShapERenderer(**UpperCamelCase__ )
return model
def __snake_case ( self ):
A__ : Optional[int] = self.dummy_prior
A__ : Dict = self.dummy_text_encoder
A__ : Optional[int] = self.dummy_tokenizer
A__ : Optional[int] = self.dummy_renderer
A__ : List[str] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
A__ : Optional[Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
if str(UpperCamelCase__ ).startswith('''mps''' ):
A__ : Optional[int] = torch.manual_seed(UpperCamelCase__ )
else:
A__ : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : List[Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self ):
A__ : Tuple = '''cpu'''
A__ : str = self.get_dummy_components()
A__ : Any = self.pipeline_class(**UpperCamelCase__ )
A__ : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A__ : Optional[Any] = output.images[0]
A__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A__ : Optional[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __snake_case ( self ):
A__ : Dict = torch_device == '''cpu'''
A__ : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def __snake_case ( self ):
A__ : Dict = self.get_dummy_components()
A__ : Dict = self.pipeline_class(**UpperCamelCase__ )
A__ : Optional[int] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[int] = 1
A__ : List[Any] = 2
A__ : Any = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A__ : List[str] = batch_size * [inputs[key]]
A__ : Optional[int] = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
A__ : Dict = ShapEPipeline.from_pretrained('''openai/shap-e''' )
A__ : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : str = pipe(
'''a shark''' , generator=UpperCamelCase__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 55
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : str , __UpperCamelCase : set , __UpperCamelCase : set , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : PriorityQueue , __UpperCamelCase : dict , __UpperCamelCase : float | int , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ : List[str] = cst_fwd.get(__UpperCamelCase , np.inf )
A__ : str = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ : Any = new_cost_f
A__ : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : dict , __UpperCamelCase : dict ) -> int:
"""simple docstring"""
A__ : Optional[int] = -1
A__ : int = set()
A__ : Optional[int] = set()
A__ : Tuple = {source: 0}
A__ : List[Any] = {destination: 0}
A__ : Any = {source: None}
A__ : Dict = {destination: None}
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : PriorityQueue[Any] = PriorityQueue()
A__ : int = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ : Any = queue_forward.get()
visited_forward.add(__UpperCamelCase )
A__ , A__ : Optional[int] = queue_backward.get()
visited_backward.add(__UpperCamelCase )
A__ : List[Any] = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
A__ : Dict = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ : Tuple = shortest_distance
return shortest_path_distance
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=16 , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=14 , UpperCamelCase__=10 , UpperCamelCase__=19 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=25 , UpperCamelCase__=5 , ):
A__ : Optional[int] = d_model
A__ : Dict = parent
A__ : List[Any] = batch_size
A__ : Dict = prediction_length
A__ : List[Any] = context_length
A__ : Union[str, Any] = cardinality
A__ : int = num_time_features
A__ : Optional[Any] = lags_sequence
A__ : Optional[Any] = embedding_dimension
A__ : Any = is_training
A__ : Dict = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : List[str] = context_length
A__ : Any = prediction_length + label_length
A__ : List[str] = label_length
A__ : str = moving_average
A__ : List[str] = autocorrelation_factor
def __snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = config.context_length + max(config.lags_sequence )
A__ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A__ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A__ : int = floats_tensor([self.batch_size, _past_length] )
A__ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length] )
A__ : str = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __snake_case ( self ):
A__ : Tuple = self.get_config()
A__ : Tuple = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def __snake_case ( self ):
A__ , A__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
A__ : List[str] = model(**UpperCamelCase__ )
A__ : Any = outputs.encoder_last_hidden_state
A__ : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Tuple = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ , A__ , A__ , A__ , A__ : str = model.create_network_inputs(**UpperCamelCase__ )
A__ , A__ : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A__ : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A__ : List[Any] = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A__ : Dict = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A__ : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A__ : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A__ : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
A__ : str = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ : Any = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : List[str] = AutoformerModelTester(self )
A__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ : int = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
A__ , A__ : int = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ : Optional[Any] = inspect.signature(getattr(UpperCamelCase__ , '''forward''' ) )
# The main input is the name of the argument after `self`
A__ : Tuple = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(UpperCamelCase__ )
A__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : List[str] = [*signature.parameters.keys()]
A__ : int = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = True
A__ : int = getattr(self.model_tester , '''seq_length''' , UpperCamelCase__ )
A__ : Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , UpperCamelCase__ )
A__ : Tuple = getattr(self.model_tester , '''encoder_seq_length''' , UpperCamelCase__ )
A__ : Dict = getattr(self.model_tester , '''d_model''' , UpperCamelCase__ )
A__ : Dict = getattr(self.model_tester , '''num_attention_heads''' , UpperCamelCase__ )
A__ : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
A__ : List[Any] = True
A__ : Optional[int] = False
A__ : Union[str, Any] = True
A__ : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict = True
A__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A__ : Union[str, Any] = len(UpperCamelCase__ )
A__ : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
A__ : str = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A__ : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A__ : Optional[int] = True
A__ : Optional[int] = True
A__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
A__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any]="train-batch.pt" ) -> str:
"""simple docstring"""
A__ : Dict = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__UpperCamelCase , repo_type='''dataset''' )
A__ : Tuple = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : int = prepare_batch()
with torch.no_grad():
A__ : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
A__ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Any = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def __snake_case ( self ):
A__ : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : Optional[Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
A__ : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
A__ : Tuple = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Dict = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def __snake_case ( self ):
A__ : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
A__ : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
A__ : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
A__ : str = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=UpperCamelCase__ )
A__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "falcon"
_lowerCAmelCase = ["past_key_values"]
def __init__( self , UpperCamelCase__=6_5024 , UpperCamelCase__=4544 , UpperCamelCase__=32 , UpperCamelCase__=71 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0_2 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=11 , UpperCamelCase__=11 , **UpperCamelCase__ , ):
A__ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
A__ : Union[str, Any] = kwargs.pop('''n_embed''' , UpperCamelCase__ )
A__ : Optional[Any] = hidden_size if n_embed is None else n_embed
A__ : Any = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : List[str] = layer_norm_epsilon
A__ : List[str] = initializer_range
A__ : Optional[int] = use_cache
A__ : str = hidden_dropout
A__ : Dict = attention_dropout
A__ : Optional[Any] = bos_token_id
A__ : List[Any] = eos_token_id
A__ : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : Tuple = alibi
A__ : Tuple = new_decoder_architecture
A__ : Optional[Any] = multi_query # Ignored when new_decoder_architecture is True
A__ : Optional[Any] = parallel_attn
A__ : int = bias
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.hidden_size // self.num_attention_heads
@property
def __snake_case ( self ):
return not self.alibi
| 55
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : BertModel , __UpperCamelCase : str , __UpperCamelCase : str ) -> Optional[int]:
"""simple docstring"""
A__ : Any = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
A__ : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
A__ : str = model.state_dict()
def to_tf_var_name(__UpperCamelCase : str ):
for patt, repl in iter(__UpperCamelCase ):
A__ : Tuple = name.replace(__UpperCamelCase , __UpperCamelCase )
return F"bert/{name}"
def create_tf_var(__UpperCamelCase : np.ndarray , __UpperCamelCase : str , __UpperCamelCase : tf.Session ):
A__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
A__ : str = tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A__ : Tuple = to_tf_var_name(__UpperCamelCase )
A__ : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A__ : Any = torch_tensor.T
A__ : int = create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
A__ : List[Any] = session.run(__UpperCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}" )
A__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any]=None ) -> List[str]:
"""simple docstring"""
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__UpperCamelCase , required=__UpperCamelCase , help='''Directory in which to save tensorflow model''' )
A__ : Dict = parser.parse_args(__UpperCamelCase )
A__ : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self ):
A__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , '''width_multiplier''' ) )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=64 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__="swish" , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0_2 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=10 , UpperCamelCase__=None , UpperCamelCase__=0.2_5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , ):
A__ : Optional[int] = parent
A__ : List[str] = batch_size
A__ : str = image_size
A__ : Dict = patch_size
A__ : Optional[int] = num_channels
A__ : List[Any] = make_divisible(512 * width_multiplier , divisor=8 )
A__ : Optional[Any] = hidden_act
A__ : Any = conv_kernel_size
A__ : str = output_stride
A__ : Any = classifier_dropout_prob
A__ : Union[str, Any] = use_labels
A__ : str = is_training
A__ : Tuple = num_labels
A__ : str = initializer_range
A__ : List[Any] = scope
A__ : List[str] = width_multiplier
A__ : int = ffn_dropout
A__ : Union[str, Any] = attn_dropout
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Union[str, Any] = None
A__ : Optional[Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.num_labels )
A__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = self.num_labels
A__ : List[Any] = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = self.num_labels
A__ : Optional[Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A__ : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : List[str] = MobileViTVaModelTester(self )
A__ : int = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __snake_case ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __snake_case ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __snake_case ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __snake_case ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] = model_class(UpperCamelCase__ )
A__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : List[str] = [*signature.parameters.keys()]
A__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] = outputs.hidden_states
A__ : Any = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A__ : str = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __snake_case ( self ):
A__ : int = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
UpperCamelCase__ )
A__ : Dict = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : List[Any] = model(**UpperCamelCase__ )
# verify the logits
A__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def __snake_case ( self ):
A__ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ : int = model.to(UpperCamelCase__ )
A__ : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ : List[str] = prepare_img()
A__ : int = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Dict = model(**UpperCamelCase__ )
A__ : Any = outputs.logits
# verify the logits
A__ : Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def __snake_case ( self ):
A__ : str = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ : Tuple = model.to(UpperCamelCase__ )
A__ : int = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
A__ : Tuple = prepare_img()
A__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Union[str, Any] = model(**UpperCamelCase__ )
A__ : Optional[int] = outputs.logits.detach().cpu()
A__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
A__ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : str = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 55
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = (UniPCMultistepScheduler,)
_lowerCAmelCase = (("num_inference_steps", 25),)
def __snake_case ( self , **UpperCamelCase__ ):
A__ : Optional[int] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase__ )
return config
def __snake_case ( self , UpperCamelCase__=0 , **UpperCamelCase__ ):
A__ : List[Any] = dict(self.forward_default_kwargs )
A__ : List[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
A__ : Optional[int] = self.dummy_sample
A__ : Tuple = 0.1 * sample
A__ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : int = self.get_scheduler_config(**UpperCamelCase__ )
A__ : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A__ : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ : Union[str, Any] = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
A__ : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A__ : Dict = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self , UpperCamelCase__=0 , **UpperCamelCase__ ):
A__ : int = dict(self.forward_default_kwargs )
A__ : List[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
A__ : int = self.dummy_sample
A__ : Optional[int] = 0.1 * sample
A__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : Optional[int] = self.get_scheduler_config()
A__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A__ : Dict = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A__ : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self , UpperCamelCase__=None , **UpperCamelCase__ ):
if scheduler is None:
A__ : List[str] = self.scheduler_classes[0]
A__ : str = self.get_scheduler_config(**UpperCamelCase__ )
A__ : Dict = scheduler_class(**UpperCamelCase__ )
A__ : Dict = self.scheduler_classes[0]
A__ : Tuple = self.get_scheduler_config(**UpperCamelCase__ )
A__ : List[str] = scheduler_class(**UpperCamelCase__ )
A__ : Tuple = 10
A__ : Tuple = self.dummy_model()
A__ : int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def __snake_case ( self ):
A__ : int = dict(self.forward_default_kwargs )
A__ : Dict = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A__ : Dict = self.get_scheduler_config()
A__ : List[Any] = scheduler_class(**UpperCamelCase__ )
A__ : List[str] = self.dummy_sample
A__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , '''set_timesteps''' ):
A__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
A__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
A__ : Union[str, Any] = scheduler.timesteps[5]
A__ : Optional[int] = scheduler.timesteps[6]
A__ : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A__ : int = self.full_loop(scheduler=UpperCamelCase__ )
A__ : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
A__ : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ : Any = DEISMultistepScheduler.from_config(scheduler.config )
A__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ : str = UniPCMultistepScheduler.from_config(scheduler.config )
A__ : Union[str, Any] = self.full_loop(scheduler=UpperCamelCase__ )
A__ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __snake_case ( self ):
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def __snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def __snake_case ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
A__ : List[str] = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def __snake_case ( self ):
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def __snake_case ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def __snake_case ( self ):
A__ : Optional[int] = self.full_loop()
A__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case ( self ):
A__ : str = self.full_loop(prediction_type='''v_prediction''' )
A__ : str = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case ( self ):
A__ : Tuple = self.scheduler_classes[0]
A__ : Any = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
A__ : Optional[Any] = scheduler_class(**UpperCamelCase__ )
A__ : int = 10
A__ : str = self.dummy_model()
A__ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ : List[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
A__ : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case ( self , **UpperCamelCase__ ):
for scheduler_class in self.scheduler_classes:
A__ : List[Any] = self.get_scheduler_config(**UpperCamelCase__ )
A__ : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 55
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[str] = ''''''
A__ : int = ''''''
A__ : Any = []
A__ : Any = 0
A__ : Optional[int] = 256
A__ : str = 0
A__ : Dict = 0
A__ : List[Any] = 0
A__ : Dict = 0
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = cva.imread(UpperCamelCase__ , 0 )
A__ : List[Any] = copy.deepcopy(self.img )
A__ , A__ , A__ : List[str] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
A__ : List[Any] = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
A__ : Union[str, Any] = x[i] / self.k
self.sk += prk
A__ : Any = (self.L - 1) * self.sk
if self.rem != 0:
A__ : str = int(last % last )
A__ : Any = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
A__ : Optional[int] = int(np.ma.count(self.img ) / self.img[1].size )
A__ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A__ : Optional[Any] = self.img[j][i]
if num != self.last_list[num]:
A__ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __snake_case ( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __snake_case ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 55
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Dict = ''''''
else:
A__ : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Dict = in_proj_weight[
: config.hidden_size, :
]
A__ : List[Any] = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Dict = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[Any] = dct.pop(__UpperCamelCase )
A__ : List[Any] = val
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str ) -> Dict:
"""simple docstring"""
A__ : List[str] = ViTConfig()
A__ : Union[str, Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A__ : Optional[int] = True
A__ : int = int(vit_name[-12:-10] )
A__ : List[Any] = int(vit_name[-9:-6] )
else:
A__ : Any = 10_00
A__ : str = '''huggingface/label-files'''
A__ : Optional[Any] = '''imagenet-1k-id2label.json'''
A__ : Any = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Dict = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : int = idalabel
A__ : List[str] = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(vit_name[-6:-4] )
A__ : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
A__ : List[str] = 1_92
A__ : Optional[Any] = 7_68
A__ : Optional[int] = 12
A__ : Tuple = 3
elif vit_name[9:].startswith('''small''' ):
A__ : int = 3_84
A__ : Union[str, Any] = 15_36
A__ : Union[str, Any] = 12
A__ : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
A__ : List[Any] = 7_68
A__ : Tuple = 23_04
A__ : List[str] = 8
A__ : Union[str, Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
A__ : Optional[int] = 10_24
A__ : List[Any] = 40_96
A__ : List[str] = 24
A__ : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
A__ : Optional[Any] = 12_80
A__ : Dict = 51_20
A__ : Dict = 32
A__ : Optional[Any] = 16
# load original model from timm
A__ : Optional[int] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A__ : List[Any] = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ : Optional[Any] = ViTModel(__UpperCamelCase ).eval()
else:
A__ : List[str] = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A__ : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
A__ : int = ViTImageProcessor(size=config.image_size )
A__ : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ : Dict = encoding['''pixel_values''']
A__ : List[str] = model(__UpperCamelCase )
if base_model:
A__ : List[Any] = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ : Optional[int] = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE : List[str] = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE : Tuple = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE : List[Any] = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE : Dict = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE : Any = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE : int = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE : int = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE : Tuple = re.compile(r'^\s*else:')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
A__ : Dict = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A__ : str = f.readlines()
A__ : Optional[Any] = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
A__ : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
A__ : Dict = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
A__ : str = re.findall('''\[([^\]]+)\]''' , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
A__ : Union[str, Any] = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
A__ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
A__ : List[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
A__ : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
A__ : Dict = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(''', ''' )
A__ : Optional[Any] = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
A__ : int = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(''', ''' )
A__ : str = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
A__ : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : Union[str, Any] = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
A__ : Optional[int] = lines[line_index]
A__ : Union[str, Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : str = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
A__ : Tuple = lines[line_index]
A__ : Optional[Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A__ : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Any ) -> int:
"""simple docstring"""
def find_duplicates(__UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : Any = []
for key in import_dict_objects.keys():
A__ : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
A__ : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : List[Any] = '''base imports''' if key == '''none''' else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
A__ : Tuple = os.path.join(__UpperCamelCase , '''__init__.py''' )
A__ : str = parse_init(__UpperCamelCase )
if objects is not None:
A__ : int = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
A__ : Any = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : Any = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
A__ : List[str] = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
A__ : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
A__ : Union[str, Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
A__ : List[Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
_SCREAMING_SNAKE_CASE : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ : List[Any] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__UpperCamelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A__ : Optional[Any] = spec.loader.load_module()
A__ : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCamelCase ) > 0:
A__ : List[Any] = '''\n'''.join(F"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ):
A__ : List[Any] = parent
A__ : Dict = batch_size
A__ : Optional[Any] = num_channels
A__ : Optional[Any] = image_size
A__ : Optional[Any] = patch_size
A__ : List[Any] = text_seq_length
A__ : List[Any] = is_training
A__ : Optional[int] = use_input_mask
A__ : Dict = use_token_type_ids
A__ : List[Any] = use_labels
A__ : int = vocab_size
A__ : Any = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : Dict = num_attention_heads
A__ : int = intermediate_size
A__ : Optional[int] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : Optional[Any] = type_vocab_size
A__ : Tuple = type_sequence_label_size
A__ : Tuple = initializer_range
A__ : Dict = coordinate_size
A__ : Optional[Any] = shape_size
A__ : int = num_labels
A__ : Dict = num_choices
A__ : Any = scope
A__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ : Any = text_seq_length
A__ : Optional[Any] = (image_size // patch_size) ** 2 + 1
A__ : List[Any] = self.text_seq_length + self.image_seq_length
def __snake_case ( self ):
A__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ : Tuple = bbox[i, j, 3]
A__ : Optional[int] = bbox[i, j, 1]
A__ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ : str = bbox[i, j, 2]
A__ : List[Any] = bbox[i, j, 0]
A__ : List[Any] = t
A__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ : str = None
if self.use_token_type_ids:
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
if self.use_labels:
A__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
A__ : List[str] = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ : Union[str, Any] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ : Tuple = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = self.num_labels
A__ : List[Any] = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = self.num_labels
A__ : str = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = config_and_inputs
A__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __snake_case ( self ):
A__ : str = LayoutLMvaModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : str = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
A__ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
A__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCamelCase__ )
A__ : Any = self.default_image_processor
A__ : Dict = prepare_img()
A__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
A__ : Optional[int] = torch.tensor([[1, 2]] )
A__ : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A__ : Dict = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
A__ : Union[str, Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A__ : Tuple = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> int:
"""simple docstring"""
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('''String lengths must match!''' )
A__ : Any = 0
for chara, chara in zip(__UpperCamelCase , __UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "swin2sr"
_lowerCAmelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase__=64 , UpperCamelCase__=1 , UpperCamelCase__=3 , UpperCamelCase__=180 , UpperCamelCase__=[6, 6, 6, 6, 6, 6] , UpperCamelCase__=[6, 6, 6, 6, 6, 6] , UpperCamelCase__=8 , UpperCamelCase__=2.0 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__="gelu" , UpperCamelCase__=False , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-5 , UpperCamelCase__=2 , UpperCamelCase__=1.0 , UpperCamelCase__="1conv" , UpperCamelCase__="pixelshuffle" , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
A__ : Dict = image_size
A__ : Dict = patch_size
A__ : List[str] = num_channels
A__ : Dict = embed_dim
A__ : Dict = depths
A__ : List[str] = len(UpperCamelCase__ )
A__ : Union[str, Any] = num_heads
A__ : Any = window_size
A__ : Tuple = mlp_ratio
A__ : str = qkv_bias
A__ : List[str] = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : List[Any] = drop_path_rate
A__ : Tuple = hidden_act
A__ : Dict = use_absolute_embeddings
A__ : Any = layer_norm_eps
A__ : Any = initializer_range
A__ : Any = upscale
A__ : int = img_range
A__ : Optional[Any] = resi_connection
A__ : Optional[int] = upsampler
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] = 3_84
if "tiny" in model_name:
A__ : Any = [3, 3, 9, 3]
A__ : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Dict = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A__ : Tuple = [3, 3, 27, 3]
A__ : List[Any] = [1_28, 2_56, 5_12, 10_24]
A__ : Union[str, Any] = 5_12
if "large" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Union[str, Any] = [1_92, 3_84, 7_68, 15_36]
A__ : Optional[Any] = 7_68
if "xlarge" in model_name:
A__ : Dict = [3, 3, 27, 3]
A__ : str = [2_56, 5_12, 10_24, 20_48]
A__ : List[str] = 10_24
# set label information
A__ : Any = 1_50
A__ : Optional[int] = '''huggingface/label-files'''
A__ : Union[str, Any] = '''ade20k-id2label.json'''
A__ : List[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Any = {v: k for k, v in idalabel.items()}
A__ : int = ConvNextConfig(
depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A__ : int = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = dct.pop(__UpperCamelCase )
A__ : Dict = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : int = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
A__ : Dict = model_name_to_url[model_name]
A__ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )['''state_dict''']
A__ : Dict = get_upernet_config(__UpperCamelCase )
A__ : Union[str, Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(__UpperCamelCase )
if "bn" in key:
A__ : Union[str, Any] = key.replace('''bn''' , '''batch_norm''' )
A__ : Union[str, Any] = val
# rename keys
A__ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
A__ : List[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
A__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
A__ : Union[str, Any] = SegformerImageProcessor()
A__ : Union[str, Any] = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
A__ : Dict = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
A__ : Union[str, Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
A__ : List[str] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
A__ : Any = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
A__ : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
A__ : List[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE_, R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
if self.framework == "tf":
A__ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __snake_case ( self , UpperCamelCase__ ):
A__ : Dict = self.get_masked_index(UpperCamelCase__ )
A__ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ):
if return_tensors is None:
A__ : str = self.framework
A__ : str = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.ensure_exactly_one_mask_token(UpperCamelCase__ )
return model_inputs
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = self.model(**UpperCamelCase__ )
A__ : List[str] = model_inputs['''input_ids''']
return model_outputs
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=5 , UpperCamelCase__=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A__ : Union[str, Any] = target_ids.shape[0]
A__ : str = model_outputs['''input_ids'''][0]
A__ : Tuple = model_outputs['''logits''']
if self.framework == "tf":
A__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ : Dict = outputs.numpy()
A__ : Tuple = outputs[0, masked_index, :]
A__ : str = stable_softmax(UpperCamelCase__ , axis=-1 )
if target_ids is not None:
A__ : int = tf.gather_nd(tf.squeeze(UpperCamelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ : str = tf.expand_dims(UpperCamelCase__ , 0 )
A__ : str = tf.math.top_k(UpperCamelCase__ , k=UpperCamelCase__ )
A__ , A__ : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
A__ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ : List[Any] = outputs[0, masked_index, :]
A__ : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
A__ : str = probs[..., target_ids]
A__ , A__ : Tuple = probs.topk(UpperCamelCase__ )
A__ : Union[str, Any] = []
A__ : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ : List[str] = input_ids.numpy().copy()
if target_ids is not None:
A__ : Any = target_ids[p].tolist()
A__ : List[str] = p
# Filter padding out:
A__ : Dict = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ : Any = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
if single_mask:
return result[0]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [targets]
try:
A__ : Any = self.tokenizer.get_vocab()
except Exception:
A__ : Tuple = {}
A__ : str = []
for target in targets:
A__ : int = vocab.get(UpperCamelCase__ , UpperCamelCase__ )
if id_ is None:
A__ : Tuple = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , max_length=1 , truncation=UpperCamelCase__ , )['''input_ids''']
if len(UpperCamelCase__ ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A__ : int = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
A__ : str = list(set(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A__ : Optional[int] = np.array(UpperCamelCase__ )
return target_ids
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
A__ : Tuple = {}
if targets is not None:
A__ : Any = self.get_target_ids(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] = target_ids
if top_k is not None:
A__ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : Any = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , ) -> str:
"""simple docstring"""
A__ : Union[str, Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
A__ , A__ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
A__ : Tuple = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
A__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A__ : List[str] = file_path.read_text(encoding='''utf-8''' )
else:
A__ : Any = output_path.read_text(encoding='''utf-8''' )
A__ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , ) -> List[str]:
"""simple docstring"""
A__ : List[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
A__ : Any = input_paths[compression_format]
if input_path is None:
A__ : Optional[Any] = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
A__ : Dict = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
A__ : Optional[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
A__ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
A__ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
import tarfile
A__ : Any = tmp_path / '''data_dot_dot'''
directory.mkdir()
A__ : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__UpperCamelCase , '''w''' ) as f:
f.add(__UpperCamelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
A__ : int = tmp_path / '''data_sym_link'''
directory.mkdir()
A__ : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ : int = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
A__ : str = insecure_tar_files[insecure_tar_file]
A__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
A__ : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
A__ : int = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 4_00_00_00 ) -> int:
"""simple docstring"""
A__ : Any = [0, 1]
A__ : Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ : int = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.